Merge "arm/dt: 8226: Add Bus voting from USB"
diff --git a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
index c71b190..24dbb4b 100644
--- a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
+++ b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
@@ -1,14 +1,27 @@
 Qualcomm Interprocessor Communication Spinlock
 
+--Dedicated Hardware Implementation--
 Required properties:
-- compatible : should be "qcom,ipc-spinlock"
+- compatible : should be "qcom,ipc-spinlock-sfpb"
 - reg : the location and size of the spinlock hardware
 - qcom,num-locks : the number of locks supported
 
 Example:
 
 	qcom,ipc-spinlock@fd484000 {
-		compatible = "qcom,ipc-spinlock";
+		compatible = "qcom,ipc-spinlock-sfpb";
 		reg = <0xfd484000 0x1000>;
 		qcom,num-locks = <32>;
 	};
+
+--LDREX Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-ldrex"
+- reg : the location and size of the shared lock memory
+
+Example:
+
+	qcom,ipc-spinlock@fa00000 {
+		compatible = "qcom,ipc-spinlock-ldrex";
+		reg = <0xfa00000 0x200000>;
+	};
diff --git a/Documentation/devicetree/bindings/bif/bif.txt b/Documentation/devicetree/bindings/bif/bif.txt
new file mode 100644
index 0000000..c4ff08b
--- /dev/null
+++ b/Documentation/devicetree/bindings/bif/bif.txt
@@ -0,0 +1,22 @@
+BIF (Battery Interface) Controllers
+
+Optional properties:
+- qcom,known-device-addresses:  Specifies a list of integers which correspond to
+                                the 8-bit BIF bus device addresses of BIF slaves
+                                found on the target.
+
+BIF Consumers
+
+Optional properties:
+- qcom,bif-ctrl:                phandle of parent BIF controller device node
+
+Example:
+	foo_ctrl: foo-controller {
+		...
+		qcom,known-device-addresses = <0x80, 0x81>;
+	};
+
+	bar-consumer {
+		...
+		qcom,bif-ctrl = <&foo_ctrl>;
+	};
diff --git a/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
new file mode 100644
index 0000000..88d69e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/bluetooth/bluetooth_power.txt
@@ -0,0 +1,16 @@
+* Bluetooth Controller
+Bluetooth controller communicates with the Bluetooth Host using HCI Transport layer.
+HCI Transport layer can be based on UART or USB serial communication protocol.
+
+Required properties:
+  - compatible: Should be "qca,ar3002"
+  - qca,bt-reset-gpio: GPIO pin to bring BT Controller out of reset
+
+Optional properties:
+  None
+
+Example:
+  bt-ar3002 {
+    compatible = "qca,ar3002";
+    qca,bt-reset-gpio = <&pm8941_gpios 34 0>;
+  };
diff --git a/Documentation/devicetree/bindings/bt-fm/fm.txt b/Documentation/devicetree/bindings/bt-fm/fm.txt
new file mode 100644
index 0000000..6bb3599
--- /dev/null
+++ b/Documentation/devicetree/bindings/bt-fm/fm.txt
@@ -0,0 +1,29 @@
+Qualcomm radio iris device
+
+-FM RX playback with no RDS
+
+   FM samples is filtered by external RF chips at baseband, then send to Riva-FM core through serial link.
+   FM signal is demodulated then audio L/R samples are stored inside memory.
+   FM Rx received samples data is connected to external audio codec.
+
+-Audio playback to FM TX
+
+  Used to play audio source  to FM TX.
+  FM TX module will read the audio samples from memory then modulated samples will be send through serial interface to external RF chip.
+
+-RX playback with RDS
+
+  FM Rx receive audio data along with RDS.
+
+-FM TX with RDS
+
+  Used to send RDS messages to external FM receiver.
+
+Required Properties:
+- compatible: "qcom,iris_fm"
+
+Example:
+	qcom,iris-fm {
+		compatible = "qcom,iris_fm";
+	};
+
diff --git a/Documentation/devicetree/bindings/coresight/coresight.txt b/Documentation/devicetree/bindings/coresight/coresight.txt
index 9635972..48f25de 100644
--- a/Documentation/devicetree/bindings/coresight/coresight.txt
+++ b/Documentation/devicetree/bindings/coresight/coresight.txt
@@ -12,6 +12,7 @@
 
 - compatible : name of the component used for driver matching
 - reg : physical base address and length of the register set(s) of the component
+- reg-names: names corresponding to each reg property value
 - coresight-id : unique integer identifier for the component
 - coresight-name : unique descriptive name of the component
 - coresight-nr-inports : number of input ports on the component
@@ -31,17 +32,23 @@
 			 component
 - coresight-child-ports : list of input port numbers of the children
 - coresight-default-sink : represents the default compile time CoreSight sink
+- coresight-ctis : list of ctis that this component interacts with
 - qcom,pc-save : program counter save implemented
 - qcom,blk-size : block size for tmc-etr to usb transfers
 - qcom,round-robin : indicates if per core etms are allowed round-robin access
 		     by the funnel
+- qcom,reset-flush-race : indicates if a race exists between flushing and ddr
+			  being put into self-refresh during watchdog reset
+- qcom,write-64bit : only 64bit data writes supported by stm
 
 Examples:
 
 1. Sinks
 	tmc_etr: tmc@fc322000 {
 		compatible = "arm,coresight-tmc";
-		reg = <0xfc322000 0x1000>;
+		reg = <0xfc322000 0x1000>,
+		      <0xfc37c000 0x3000>;
+		reg-names = "tmc-etr-base", "tmc-etr-bam-base";
 
 		coresight-id = <0>;
 		coresight-name = "coresight-tmc-etr";
@@ -52,6 +59,7 @@
 	tpiu: tpiu@fc318000 {
 		compatible = "arm,coresight-tpiu";
 		reg = <0xfc318000 0x1000>;
+		reg-names = "tpiu-base";
 
 		coresight-id = <1>;
 		coresight-name = "coresight-tpiu";
@@ -62,6 +70,7 @@
 	funnel_merg: funnel@fc31b000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc31b000 0x1000>;
+		reg-names = "funnel-merg-base";
 
 		coresight-id = <4>;
 		coresight-name = "coresight-funnel-merg";
@@ -74,6 +83,7 @@
 	funnel_in0: funnel@fc319000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc319000 0x1000>;
+		reg-names = "funnel-in0-base";
 
 		coresight-id = <5>;
 		coresight-name = "coresight-funnel-in0";
@@ -88,6 +98,7 @@
 		compatible = "arm,coresight-stm";
 		reg = <0xfc321000 0x1000>,
 		      <0xfa280000 0x180000>;
+		reg-names = "stm-base", "stm-data-base";
 
 		coresight-id = <9>;
 		coresight-name = "coresight-stm";
@@ -100,6 +111,7 @@
 	etm0: etm@fc33c000 {
 		compatible = "arm,coresight-etm";
 		reg = <0xfc33c000 0x1000>;
+		reg-names = "etm0-base";
 
 		coresight-id = <10>;
 		coresight-name = "coresight-etm0";
@@ -110,3 +122,24 @@
 		qcom,pc-save;
 		qcom,round-robin;
 	};
+
+4. Miscellaneous
+	cti0: cti@fc308000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc308000 0x1000>;
+		reg-names = "cti0-base";
+
+		coresight-id = <15>;
+		coresight-name = "coresight-cti0";
+		coresight-nr-inports = <0>;
+	};
+
+	cti1: cti@fc309000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc309000 0x1000>;
+		reg-names = "cti1-base";
+
+		coresight-id = <16>;
+		coresight-name = "coresight-cti1";
+		coresight-nr-inports = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index 2cc2696..bf97e80 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -6,6 +6,12 @@
   - reg-names : should contain the crypto and bam base register names.
   - interrupts : should contain crypto BAM interrupt.
   - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,msm_bus,name: Should be "qcedev-noc"
+  - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+  - qcom,msm_bus,active-only: Default vector index
+  - qcom,msm_bus,num_paths: The paths for source and destination ports
+  - qcom,msm_bus,vectors: Vectors for bus topology.
 
 Example:
 
@@ -16,4 +22,12 @@
 		reg-names = "crypto-base","crypto-bam-base";
 		interrupts = <0 235 0>;
 		qcom,bam-pipe-pair = <0>;
+		qcom,ce-hw-instance = <1>;
+                qcom,msm-bus,name = "qcedev-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>,
 	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 4f9dd06..c99262b 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -5,7 +5,13 @@
   - reg : should contain crypto, BAM register map.
   - reg-names : should contain the crypto and bam base register names.
   - interrupts : should contain crypto BAM interrupt.
-  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair.
+  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,msm_bus,name: Should be "qcrypto-noc"
+  - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+  - qcom,msm_bus,active-only: Default vector index
+  - qcom,msm_bus,num_paths: The paths for source and destination ports
+  - qcom,msm_bus,vectors: Vectors for bus topology.
 
 Example:
 
@@ -16,4 +22,12 @@
 		reg-names = "crypto-base","crypto-bam-base";
 		interrupts = <0 235 0>;
 		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <1>;
+                qcom,msm-bus,name = "qcrypto-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>,
 	};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 94746b8..0588c5e 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -56,6 +56,7 @@
 - qcom,enable-gpio:			Specifies the panel lcd/display enable gpio.
 - qcom,rst-gpio:			Specifies the panel reset gpio.
 - qcom,mdss-pan-broadcast-mode:		Boolean used to enable broadcast mode.
+- qcom,cont-splash-enabled:		Boolean used to enable continuous splash mode.
 - qcom,mdss-pan-porch-values:		An array of size 6 that specifies the panel blanking values.
 - qcom,mdss-pan-underflow-clr:		Specifies the controller settings for the panel underflow clear
 					settings. Default value is 0xff.
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 1e47c02..0004302 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -24,7 +24,11 @@
 				KGSL_CLK_AXI    0x00000020
 
 Bus Scaling Data:
-- qcom,grp3d-vectors:	A series of 4 cell properties, format of which is:
+- qcom,msm-bus,name: String property to describe the name of the 3D graphics processor.
+- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases defined in the vectors property.
+- qcom,msm-bus,active-only: A boolean flag indicating if it is active only.
+- qcom,msm-bus,num-paths: This represents the number of paths in each Bus Scaling Usecase.
+- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, format of which is:
 						<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 1
 						<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 2
 						<..  ..  .. ..>, <..  ..  .. ..>; // For Bus Scaling Usecase n
@@ -41,8 +45,6 @@
 						1 = MSM_BUS_SLAVE_OCMEM
 					ab: Represents aggregated bandwidth. This value is 0 for Graphics.
 					ib: Represents instantaneous bandwidth. This value has a range <0 8000 MB/s>
-- qcom,grp3d-num-vectors-per-usecase:	This represents the number of vectors in each Bus Scaling Usecase.
-- qcom,grp3d-num-bus-scale-usecases:	This is the the number of Bus Scaling use cases defined in the vectors property
 
 GDSC Oxili Regulators:
 - vddcx-supply:			Phandle for vddcx regulator device node.
@@ -93,12 +95,17 @@
 		qcom,clk-map = <0x00000016>; //KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE
 
 		/* Bus Scale Settings */
-		qcom,grp3d-vectors = <0 0 0 0>, <2 1 0 0>,
-				<0 0 0 2000>, <2 1 0 3000>,
-				<0 0 0 4000>, <2 1 0 5000>,
-				<0 0 0 6400>, <2 1 0 7600>;
-		qcom,grp3d-num-vectors-per-usecase = <2>;
-		qcom,grp3d-num-bus-scale-usecases = <4>;
+		qcom,msm-bus,name = "grp3d";
+		qcom,msm-bus,num-cases = <6>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>, <89 604 0 0>,
+				<26 512 0 2200000>, <89 604 0 3000000>,
+				<26 512 0 4000000>, <89 604 0 3000000>,
+				<26 512 0 4000000>, <89 604 0 4500000>,
+				<26 512 0 6400000>, <89 604 0 4500000>,
+				<26 512 0 6400000>, <89 604 0 7600000>;
 
 		/* GDSC oxili regulators */
 		vddcx-supply = <&gdsc_oxili_cx>;
diff --git a/Documentation/devicetree/bindings/input/qpnp-keypad.txt b/Documentation/devicetree/bindings/input/qpnp-keypad.txt
new file mode 100644
index 0000000..8f7fbe7
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/qpnp-keypad.txt
@@ -0,0 +1,57 @@
+Qualcomm QPNP keypad controller
+
+The qpnp-keypad driver supports the PMIC keypad controller module
+in the Qualcomm PMICs. This controller supports 10 x 8 (row x col)
+configuration and is connected to the host processor on the
+SPMI interface.
+
+Required properties:
+- compatible:		Must be "qcom,qpnp-keypad"
+- reg:			Specifies the SPMI address and size for the keypad controller
+- interrupts:		Specifies the interrupt associated with keypad controller
+- interrupt-names: 	The names of the 2 interrupts assocaited with the keypad
+			controller. They are - "kp-sense" and "kp-stuck".
+- keypad,num-rows:	Number of rows used in the keypad configuration. These
+			rows are the number of PMIC gpios configured as drive
+			lines. Possible values: Max = 10, Min = 2.
+- keypad,num-columns:	Number of columns used in the keypad configuration. These
+			cols are number of PMIC gpios configured as sense lines.
+			Possible values: Max = 8, Min = 1.
+- linux,keymap:		Row-column-keycode mapping. It is an array of packed
+			entries containing the equivalent of row, column and
+			linux key-code. Each value represented as
+			(row << 24 | column << 16 | key-code)
+
+Optional Properties:
+- qcom,scan-delay-ms:	Wait time in milliseconds before each keypad scan.
+			This is used to determine if the key has been stuck.
+			Possible values: 1, 2, 4, 8, 16, 32, 64, 128ms.
+- qcom,row-hold-ns:	Wait time in nanoseconds between each row assertion.
+			Configured based on last-row scan delay.
+			Possible values: 31250, 62500, 125000, 250000ns.
+- qcom,debounce-ms:	Wait time in milliseconds before the column data is
+			sampled for key	press detection.
+			Possible values: 5, 10, 15, 20ms.
+- qcom,wakeup:		Configure the keypad as a wakeup source. This is a
+			boolean property.
+- linux,keypad-no-autorepeat:
+			Disables the auto-repeat feature for the keys. This
+			is a boolean property.
+
+Example:
+
+	qcom,keypad@a800 {
+		compatible = "qcom,qpnp-keypad";
+		reg = <0xA800 0x100>;
+		interrupts = <0x1 0xA8 0x0>,
+			     <0x1 0xA8 0x1>;
+		interrupt-names = "kp-sense", "kp-stuck";
+		keypad,num-rows = <2>;
+		keypad,num-cols = <2>;
+		qcom,scan-delay-ms = <128>;
+		qcom,row-hold-ns = <31250>;
+		qcom,debounce-ms = <20>;
+		qcom,wakeup;
+		linux,keymap = <0x00000001 0x00010002
+			       0x01000003 0x01010004>;
+	}
diff --git a/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt
index bcea355..6fe88a9 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/atmel-mxt-ts.txt
@@ -17,11 +17,15 @@
  - atmel,family-id	: family identification of the controller
  - atmel,variant-id	: variant identification of the controller
  - atmel,version	: firmware version of the controller
- - atmel,build	i	: firmware build number of the controller
- - atmel,bootldr-id	: bootloader identification of the controller
- - atmel,fw-name	: firmware name to used for flashing firmware
+ - atmel,build		: firmware build number of the controller
+
+Required for firmware update only:
+ - atmel,fw-name		: firmware name to use for flashing firmware
+ - atmel,bootldr-id		: bootloader identification of the controller
 
 Optional property:
+ - atmel,bl-addr		: bootloader address, by default is looked up
+					in mxt_slave_addresses structure
  - atmel,config			: configuration parameter for the controller
  - atmel,i2c-pull-up		: specify to indicate pull up is needed
  - vcc_i2c-supply		: Power source required to pull up i2c bus
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu_v0.txt b/Documentation/devicetree/bindings/iommu/msm_iommu_v0.txt
index ea2d43a..cc1ffc2 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu_v0.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu_v0.txt
@@ -7,6 +7,10 @@
 - qcom,glb-offset : Offset for the global register base.
 
 Optional properties:
+- interrupts : should contain the performance monitor overflow interrupt number.
+- qcom,iommu-pmu-ngroups: Number of Performance Monitor Unit (PMU) groups.
+- qcom,iommu-pmu-ncounters: Number of PMU counters per group.
+- qcom,iommu-pmu-event-classes: List of event classes supported.
 - List of sub nodes, one for each of the translation context banks supported.
   Each sub node has the following required properties:
 
@@ -28,6 +32,11 @@
 		ranges;
 		reg = <0xfd890000 0x10000>;
 		qcom,glb-offset = <0xF000>;
+		interrupts = <0 38 0>;
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <4>;
+		qcom,iommu-pmu-event-classes = <0x08
+						0x11>;
 
 		qcom,iommu-ctx@fd000000 {
 			reg = <0xfd000000 0x1000>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index cd14056..f97e063 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -31,6 +31,14 @@
   request by different video encoder usecases.
 - qcom,dec-ddr-ab-ib : list of bus vectors(ab, ib pair) for ddr bandwidth
   request by different video decoder usecases.
+- qcom,iommu-groups : list of IOMMU groups to be used.  Groups are defined as
+  phandles in <target>-iommu-domains.dtsi (e.g msm8974-v1-iommu-domains.dtsi)
+- qcom,iommu-group-buffer-types : bitmap of buffer types that can be mapped into
+  the corresponding IOMMU group. Buffer types are defined within the vidc driver
+  by "enum hal_buffer" in msm_smem.h
+- qcom,buffer-type-tz-usage-table : a key-value pair, mapping a buffer type
+  (enum hal_buffer) to its corresponding TZ usage. The TZ usages are defined
+  as "enum cp_mem_usage" in include/linux/msm_ion.h
 
 Example:
 
@@ -59,4 +67,8 @@
 			<60000 664950>;
 		qcom,dec-ddr-ab-ib = <0 0>,
 			<110000 909000>;
+		qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
+		qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
+		qcom,buffer-type-tz-usage-table = <0x1 0x1>,
+						<0x1fe 0x2>;
 	};
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
new file mode 100644
index 0000000..e98ee05
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory.txt
@@ -0,0 +1,106 @@
+* Memory binding
+
+The /memory node provides basic information about the address and size
+of the physical memory. This node is usually filled or updated by the
+bootloader, depending on the actual memory configuration of the given
+hardware.
+
+The memory layout is described by the folllowing node:
+
+memory {
+	reg =  <(baseaddr1) (size1)
+		(baseaddr2) (size2)
+		...
+		(baseaddrN) (sizeN)>;
+};
+
+baseaddrX:	the base address of the defined memory bank
+sizeX:		the size of the defined memory bank
+
+More than one memory bank can be defined.
+
+
+* Memory regions
+
+In /memory node one can create additional nodes describing particular
+memory regions, usually for the special usage by various device drivers.
+A good example are contiguous memory allocations or memory sharing with
+other operating system on the same hardware board. Those special memory
+regions might depend on the board configuration and devices used on the
+target system.
+
+Parameters for each memory region can be encoded into the device tree
+wit the following convention:
+
+(name): region@(base-address) {
+	reg = <(baseaddr) (size)>;
+	(linux,contiguous-region);
+	(linux,default-contiguous-region);
+        label = (unique_name);
+};
+
+name:		an name given to the defined region.
+base-address:	the base address of the defined region.
+size:		the size of the memory region.
+linux,contiguous-region: property indicating that the defined memory
+		region is used for contiguous memory allocations,
+		Linux specific (optional)
+linux,default-contiguous-region: property indicating that the region
+		is the default region for all contiguous memory
+		allocations, Linux specific (optional)
+label:		an internal name used for automatically associating the
+		cma region with a given device. The label is optional;
+		if the label is not given the client is responsible for
+		calling the appropriate functions to associate the region
+		with a device.
+
+* Device nodes
+
+Once the regions in the /memory node are defined, they can be assigned
+to device some device nodes for their special use. The following
+properties are defined:
+
+linux,contiguous-region = <&phandle>;
+	This property indicates that the device driver should use the
+	memory region pointed by the given phandle.
+
+
+* Example:
+
+This example defines a memory consisting of 4 memory banks. 2 contiguous
+regions are defined for Linux kernel, one default of all device drivers
+(named contig_mem, placed at 0x72000000, 64MiB) and one dedicated to the
+framebuffer device (named display_mem, placed at 0x78000000, 16MiB). The
+display_mem region is then assigned to fb@12300000 device for contiguous
+memory allocation with Linux kernel drivers.
+
+The reason for creating a separate region for framebuffer device is to
+match the framebuffer address of from configuration done by bootloader,
+so once Linux kernel drivers starts, no glitches on the displayed boot
+logo appears.
+
+/ {
+	/* ... */
+	memory {
+		reg =  <0x40000000 0x10000000
+			0x50000000 0x10000000
+			0x60000000 0x10000000
+			0x70000000 0x10000000>;
+
+		contig_mem: region@72000000 {
+			linux,contiguous-region;
+			linux,default-contiguous-region;
+			reg = <0x72000000 0x4000000>;
+		};
+
+		display_mem: region@78000000 {
+			linux,contiguous-region;
+			reg = <0x78000000 0x1000000>;
+		};
+	};
+
+	fb@12300000 {
+		linux,contiguous-region = <&display_mem>;
+		status = "okay";
+	};
+};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 5e311be..3cd29e4 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -13,9 +13,11 @@
        registers.
 - reg-names: "ipa-base" - string to identify the IPA CORE base registers.
 	     "bam-base" - string to identify the IPA BAM base registers.
+	     "a2-bam-base" - string to identify the A2 BAM base registers.
 - interrupts: Specifies the interrupt associated with IPA.
 - interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
                    "bam-irq" - string to identify the IPA BAM interrupt.
+                   "a2-bam-irq" - string to identify the A2 BAM interrupt.
 - qcom,ipa-hw-ver: Specifies the IPA hardware version.
 
 IPA pipe sub nodes (A2 static pipes configurations):
@@ -49,10 +51,12 @@
 	compatible = "qcom,ipa";
 	reg = <0xfd4c0000 0x26000>,
 	      <0xfd4c4000 0x14818>;
-	reg-names = "ipa-base", "bam-base";
+	      <0xfc834000 0x7000>;
+	reg-names = "ipa-base", "bam-base"; "a2-bam-base";
 	interrupts = <0 252 0>,
 	             <0 253 0>;
-	interrupt-names = "ipa-irq", "bam-irq";
+	             <0 29 1>;
+	interrupt-names = "ipa-irq", "bam-irq"; "a2-bam-irq";
 	qcom,ipa-hw-ver = <1>;
 
 	qcom,pipe1 {
diff --git a/Documentation/devicetree/bindings/platform/msm/ssm.txt b/Documentation/devicetree/bindings/platform/msm/ssm.txt
new file mode 100644
index 0000000..8fb3356
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ssm.txt
@@ -0,0 +1,30 @@
+* Qualcomm Secure Service Module (SSM)
+
+SSM provides an interface for OEM driver to communicate with Modem and
+trustzone.
+
+This module provides following features:
+ - Keyexchange between Modem and trustzone for encryption/Decryption
+ of mode information
+ - Interface to third party driver to send mode updates to modem
+ - Interface for loading the trustzone application
+
+Required properties:
+- compatible:		Must be "qcom,ssm"
+
+Optional properties:
+- qcom,channel-name:	Name of the SMD channel used for communication
+			between MODEM and SSM driver.
+- qcom,need-keyexhg	This property controls initial key exchange
+			between APPS(application processor) and MODEM.
+			If not mentioned the initial key exchange is
+			not required.
+			If this property is mentioned then it is mandatory
+			for modem to perform initial key exchange with APPS.
+
+Example:
+	qcom,ssm {
+		compatible = "qcom,ssm";
+		qcom,channel-name = "SSM_RTR";
+		qcom,need-keyexhg;
+	}
diff --git a/Documentation/devicetree/bindings/regulator/krait-regulator.txt b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
index f057834..c783ac8 100644
--- a/Documentation/devicetree/bindings/regulator/krait-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/krait-regulator.txt
@@ -1,5 +1,20 @@
 Krait Voltage regulators
 
+The cpus are powered using a single supply powered by PMIC ganged regulators operating in
+different phases. Individual kraits further can draw power from the single supply via
+a LDO or a head switch (BHS).  The first level node represents the PMIC ganged regulator
+and its properties and encompasses second level nodes that represent the individual
+krait LDO/BHS control regulator.
+
+[First Level Nodes]
+Required properties:
+- compatible:			Must be "qcom,krait-pdn"
+
+Optional properties:
+- qcom,use-phase-switching	indicates whether the driver should add/shed phases on the PMIC
+				ganged regulator as cpus are hotplugged.
+
+[Second Level Nodes]
 Required properties:
 - compatible:			Must be "qcom,krait-regulator"
 - reg:				Specifies the address and size for this regulator device,
@@ -27,19 +42,26 @@
 binding, defined in regulator.txt, can also be used.
 
 Example:
-	krait0_vreg: regulator@f9088000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait0";
-		reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
-			<0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
-		reg-names = "acs", "mdd";
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-		qcom,headroom-voltage = <150000>;
-		qcom,retention-voltage = <745000>;
-		qcom,ldo-default-voltage = <745000>;
-		qcom,ldo-threshold-voltage = <750000>;
-		qcom,ldo-delta-voltage = <50000>;
-		qcom,cpu-num = 0;
-	};
+	krait_pdn: krait-pdn {
+		compatible = "qcom,krait-pdn";
+		qcom,use-phase-switching;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
 
+		krait0_vreg: regulator@f9088000 {
+			compatible = "qcom,krait-regulator";
+			regulator-name = "krait0";
+			reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
+				<0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
+			reg-names = "acs", "mdd";
+			regulator-min-microvolt = <500000>;
+			regulator-max-microvolt = <1100000>;
+			qcom,headroom-voltage = <150000>;
+			qcom,retention-voltage = <675000>;
+			qcom,ldo-default-voltage = <750000>;
+			qcom,ldo-threshold-voltage = <850000>;
+			qcom,ldo-delta-voltage = <50000>;
+			qcom,cpu-num = <0>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 9d0b0a5..2cdc7ff 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -52,6 +52,13 @@
 - qcom,hsusb-otg-clk-always-on-workaround: If present then USB core clocks
 	    remain active upon receiving bus suspend and USB cable is connected.
 	    Used for allowing USB to respond for remote wakup.
+- <supply-name>-supply: handle to the regulator device tree node
+         Required "supply-name" is "HSUSB_VDDCX" (when voting for VDDCX) or
+         "hsusb_vdd_dig" (when voting for VDDCX Corner voltage),
+         "HSUSB_1p8-supply" and "HSUSB_3p3-supply".
+- qcom,vdd-voltage-level: This property must be a list of three integer
+	values (no, min, max) where each value represents either a voltage
+	in microvolts or a value corresponding to voltage corner.
 
 Example HSUSB OTG controller device node :
 	usb@f9690000 {
@@ -72,6 +79,10 @@
 		qcom,hsusb-otg-pmic-id-irq = <47>
 		qcom,hsusb-otg-lpm-on-dev-suspend;
 		qcom,hsusb-otg-clk-always-on-workaround;
+		hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
+                HSUSB_1p8-supply = <&pm8226_l10>;
+                HSUSB_3p3-supply = <&pm8226_l20>;
+		qcom,vdd-voltage-level = <1 5 7>;
 
 		qcom,msm_bus,name = "usb2";
 		qcom,msm_bus,num_cases = <2>;
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 5df176e..6f53742 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -53,6 +53,14 @@
 	For printing struct resources. The 'R' and 'r' specifiers result in a
 	printed resource with ('R') or without ('r') a decoded flags member.
 
+Physical addresses:
+
+	%pa	0x01234567 or 0x0123456789abcdef
+
+	For printing a phys_addr_t type (and its derivatives, such as
+	resource_size_t) which can vary based on build options, regardless of
+	the width of the CPU data path. Passed by reference.
+
 MAC/FDDI addresses:
 
 	%pM	00:01:02:03:04:05
@@ -134,9 +142,9 @@
 	printk("%lld", (long long)s64_var);
 
 If <type> is dependent on a config option for its size (e.g., sector_t,
-blkcnt_t, phys_addr_t, resource_size_t) or is architecture-dependent
-for its size (e.g., tcflag_t), use a format specifier of its largest
-possible type and explicitly cast to it.  Example:
+blkcnt_t) or is architecture-dependent for its size (e.g., tcflag_t), use a
+format specifier of its largest possible type and explicitly cast to it.
+Example:
 
 	printk("test: sector number/total blocks: %llu/%llu\n",
 		(unsigned long long)sector, (unsigned long long)blockcount);
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ed18cae..3a9b770 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,27 +63,6 @@
 	      8 - SIGSEGV faults
 	     16 - SIGBUS faults
 
-config DEBUG_RODATA
-	bool "Write protect kernel text section"
-	default n
-	depends on DEBUG_KERNEL && MMU
-	---help---
-	  Mark the kernel text section as write-protected in the pagetables,
-	  in order to catch accidental (and incorrect) writes to such const
-	  data. This will cause the size of the kernel, plus up to 4MB, to
-	  be mapped as pages instead of sections, which will increase TLB
-	  pressure.
-	  If in doubt, say "N".
-
-config DEBUG_RODATA_TEST
-	bool "Testcase for the DEBUG_RODATA feature"
-	depends on DEBUG_RODATA
-	default n
-	---help---
-	  This option enables a testcase for the DEBUG_RODATA
-	  feature.
-	  If in doubt, say "N"
-
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
 	bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/boot/dts/msm-iommu-v0.dtsi b/arch/arm/boot/dts/msm-iommu-v0.dtsi
index 59b2a90..0c44fb5 100644
--- a/arch/arm/boot/dts/msm-iommu-v0.dtsi
+++ b/arch/arm/boot/dts/msm-iommu-v0.dtsi
@@ -17,8 +17,16 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd000000 0x10000>;
+		interrupts = <0 248 0>;
 		qcom,glb-offset = <0xF000>;
 		label = "lpass_iommu";
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <4>;
+		qcom,iommu-pmu-event-classes = <0x08
+						0x09
+						0x10
+						0x12
+						0x80>;
 		status = "disabled";
 
 		lpass_q6_fw: qcom,iommu-ctx@fd000000 {
@@ -56,8 +64,16 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd010000 0x10000>;
+		interrupts = <0 252 0>;
 		qcom,glb-offset = <0xF000>;
 		label = "copss_iommu";
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <4>;
+		qcom,iommu-pmu-event-classes = <0x08
+						0x09
+						0x10
+						0x12
+						0x80>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fd010000 {
@@ -123,8 +139,16 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd860000 0x10000>;
+		interrupts = <0 245 0>;
 		qcom,glb-offset = <0xF000>;
 		label = "mdpe_iommu";
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <4>;
+		qcom,iommu-pmu-event-classes = <0x08
+						0x09
+						0x10
+						0x12
+						0x80>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fd860000 {
@@ -148,20 +172,28 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd870000 0x10000>;
+		interrupts = <0 73 0>;
 		qcom,glb-offset = <0xF000>;
 		label = "mdps_iommu";
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <4>;
+		qcom,iommu-pmu-event-classes = <0x08
+						0x09
+						0x10
+						0x12
+						0x80>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fd870000 {
 			reg = <0xfd870000 0x1000>;
-			interrupts = <0 247 0>;
+			interrupts = <0 47 0>;
 			qcom,iommu-ctx-mids = <0>;
 			label = "mdps_0";
 		};
 
 		qcom,iommu-ctx@fd871000 {
 			reg = <0xfd871000 0x1000>;
-			interrupts = <0 247 0>;
+			interrupts = <0 47 0>;
 			qcom,iommu-ctx-mids = <1>;
 			label = "mdps_1";
 		};
@@ -173,8 +205,16 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd880000 0x10000>;
+		interrupts = <0 38 0>;
 		qcom,glb-offset = <0xF000>;
 		label = "gfx_iommu";
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <4>;
+		qcom,iommu-pmu-event-classes = <0x08
+						0x09
+						0x10
+						0x12
+						0x80>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fd880000 {
@@ -207,8 +247,16 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd890000 0x10000>;
+		interrupts = <0 62 0>;
 		qcom,glb-offset = <0xF000>;
 		label = "vfe_iommu";
+		qcom,iommu-pmu-ngroups = <1>;
+		qcom,iommu-pmu-ncounters = <4>;
+		qcom,iommu-pmu-event-classes = <0x08
+						0x09
+						0x10
+						0x12
+						0x80>;
 		status = "disabled";
 
 		qcom,iommu-ctx@fd890000 {
diff --git a/arch/arm/boot/dts/msm-pm8226.dtsi b/arch/arm/boot/dts/msm-pm8226.dtsi
index de23f4c..6a07bad 100644
--- a/arch/arm/boot/dts/msm-pm8226.dtsi
+++ b/arch/arm/boot/dts/msm-pm8226.dtsi
@@ -22,6 +22,33 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			interrupts = <0x0 0x8 0x0>,
+				     <0x0 0x8 0x1>,
+				     <0x0 0x8 0x4>;
+			interrupt-names = "kpdpwr", "resin", "resin-bark";
+			qcom,pon-dbc-delay = <15625>;
+			qcom,system-reset;
+
+			qcom,pon_1 {
+				qcom,pon-type = <0>;
+				qcom,pull-up = <1>;
+				linux,code = <116>;
+			};
+
+			qcom,pon_2 {
+				qcom,pon-type = <1>;
+				qcom,support-reset = <1>;
+				qcom,pull-up = <1>;
+				qcom,s1-timer = <0>;
+				qcom,s2-timer = <2000>;
+				qcom,s2-type = <1>;
+				linux,code = <114>;
+			};
+		};
+
 		pm8226_gpios: gpios {
 			spmi-dev-container;
 			compatible = "qcom,qpnp-pin";
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index c1d8664..54f603d 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -632,7 +632,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <1>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -643,7 +643,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <0>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -654,7 +654,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <4>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -665,7 +665,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -676,7 +676,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -687,7 +687,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -698,7 +698,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -709,7 +709,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 
@@ -720,7 +720,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <0>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 			};
 		};
@@ -770,7 +770,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 				qcom,btm-channel-number = <0x48>;
 			};
@@ -782,7 +782,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <1>;
-				qcom,hw-settle-time = <0xf>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 				qcom,btm-channel-number = <0x68>;
 			};
@@ -794,7 +794,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "absolute";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 				qcom,btm-channel-number = <0x70>;
 			};
@@ -806,7 +806,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 				qcom,btm-channel-number = <0x78>;
 			};
@@ -818,7 +818,7 @@
 				qcom,pre-div-channel-scaling = <0>;
 				qcom,calibration-type = "ratiometric";
 				qcom,scale-function = <2>;
-				qcom,hw-settle-time = <0>;
+				qcom,hw-settle-time = <2>;
 				qcom,fast-avg-setup = <0>;
 				qcom,btm-channel-number = <0x80>;
 			};
diff --git a/arch/arm/boot/dts/msm8226-cdp.dts b/arch/arm/boot/dts/msm8226-cdp.dts
index 7263e42..f01491f 100644
--- a/arch/arm/boot/dts/msm8226-cdp.dts
+++ b/arch/arm/boot/dts/msm8226-cdp.dts
@@ -19,6 +19,69 @@
 	qcom,msm-id = <145 1 0>;
 
 	serial@f991f000 {
-		status = "disabled";
+		status = "ok";
 	};
 };
+
+&sdcc1 {
+	vdd-supply = <&pm8226_l17>;
+	qcom,vdd-always-on;
+	qcom,vdd-lpm-sup;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <800 500000>;
+
+	vdd-io-supply = <&pm8226_l6>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <250 154000>;
+
+	qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+	qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,sup-voltages = <2950 2950>;
+
+	qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+	qcom,nonremovable;
+
+	status = "ok";
+};
+
+&sdcc2 {
+	vdd-supply = <&pm8226_l18>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <9000 800000>;
+
+	vdd-io-supply = <&pm8226_l21>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <6 22000>;
+
+	qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+	qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,sup-voltages = <2950 2950>;
+
+	qcom,xpc;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+	qcom,current-limit = <600>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdcc2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+			1 &intc 0 220 0
+			2 &msmgpio 38 0x3>;
+	interrupt-names = "core_irq", "bam_irq", "status_irq";
+	cd-gpios = <&msmgpio 38 0x1>;
+
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226-fluid.dts b/arch/arm/boot/dts/msm8226-fluid.dts
index 966157e..af86922 100644
--- a/arch/arm/boot/dts/msm8226-fluid.dts
+++ b/arch/arm/boot/dts/msm8226-fluid.dts
@@ -16,7 +16,7 @@
 / {
 	model = "Qualcomm MSM 8226 FLUID";
 	compatible = "qcom,msm8226-fluid", "qcom,msm8226";
-	qcom,msm-id = <145 1 0>;
+	qcom,msm-id = <145 3 0>;
 
 	serial@f991f000 {
 		status = "disabled";
diff --git a/arch/arm/boot/dts/msm8226-gpu.dtsi b/arch/arm/boot/dts/msm8226-gpu.dtsi
index 76e934e..2734726 100644
--- a/arch/arm/boot/dts/msm8226-gpu.dtsi
+++ b/arch/arm/boot/dts/msm8226-gpu.dtsi
@@ -15,6 +15,13 @@
 &msm_gpu {
 	qcom,chipid = <0x03000510>;
 
+	qcom,clk-map = <0x00000016>; /* KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE */
+
+	/* Bus Scale Settings */
+	qcom,msm-bus,name = "grp3d";
+	qcom,msm-bus,num-cases = <4>;
+	qcom,msm-bus,active-only = <0>;
+	qcom,msm-bus,num-paths = <2>;
 	qcom,msm-bus,vectors-KBps =
 			<26 512 0 0>, <89 604 0 0>,
 			<26 512 0 1600000>, <89 604 0 6400000>,
diff --git a/arch/arm/boot/dts/msm8226-iommu.dtsi b/arch/arm/boot/dts/msm8226-iommu.dtsi
index d23d324..51c2f38 100644
--- a/arch/arm/boot/dts/msm8226-iommu.dtsi
+++ b/arch/arm/boot/dts/msm8226-iommu.dtsi
@@ -14,24 +14,216 @@
 
 &jpeg_iommu {
 	status = "ok";
+
+	qcom,iommu-bfb-regs =  <0x604c
+				0x6050
+				0x6514
+				0x6540
+				0x656c
+				0x6314
+				0x6394
+				0x6414
+				0x60ac
+				0x615c
+				0x620c
+				0x6008
+				0x600c
+				0x6010
+				0x6014>;
+
+	qcom,iommu-bfb-data =  <0x0000ffff
+				0x00000000
+				0x4
+				0x4
+				0x0
+				0x0
+				0x10
+				0x50
+				0x0
+				0x10
+				0x20
+				0x0
+				0x0
+				0x0
+				0x0>;
 };
 
 &mdp_iommu {
 	status = "ok";
 	/* HACK: set to -1 during pre-si due to lack of TZ */
 	qcom,iommu-secure-id = <0xFFFFFFFF>;
+
+	qcom,iommu-bfb-regs =  <0x604c
+				0x6050
+				0x6514
+				0x6540
+				0x656c
+				0x60ac
+				0x615c
+				0x620c
+				0x6314
+				0x6394
+				0x6414
+				0x6008
+				0x600c
+				0x6010
+				0x6014
+				0x6018
+				0x601c
+				0x6020>;
+
+	qcom,iommu-bfb-data =  <0xffffffff
+				0x00000000
+				0x00000004
+				0x00000010
+				0x00000000
+				0x00000000
+				0x00000013
+				0x00000017
+				0x0
+				0x13
+				0x23
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0>;
 };
 
 &venus_iommu {
 	status = "ok";
 	/* HACK: set to -1 during pre-si due to lack of TZ */
 	qcom,iommu-secure-id = <0xFFFFFFFF>;
+
+	qcom,iommu-bfb-regs =  <0x604c
+				0x6050
+				0x6514
+				0x6540
+				0x656c
+				0x60ac
+				0x615c
+				0x620c
+				0x6314
+				0x6394
+				0x6414
+				0x6008
+				0x600c
+				0x6010
+				0x6014
+				0x6018
+				0x601c
+				0x6020
+				0x6024
+				0x6028
+				0x602c
+				0x6030
+				0x6034
+				0x6038>;
+
+	qcom,iommu-bfb-data =  <0xffffffff
+				0xffffffff
+				0x00000004
+				0x00000008
+				0x00000000
+				0x00000000
+				0x00000094
+				0x000000b4
+				0x0
+				0x94
+				0x114
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0>;
+};
+
+&venus_ns {
+	   qcom,iommu-ctx-sids = <0 1 2 3 4 5 7>;
+};
+
+&venus_cp {
+	   qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84>;
 };
 
 &kgsl_iommu {
 	status = "ok";
+
+	qcom,iommu-bfb-regs =  <0x604c
+				0x6050
+				0x6514
+				0x6540
+				0x656c
+				0x60ac
+				0x615c
+				0x620c
+				0x6314
+				0x6394
+				0x6414
+				0x6008>;
+
+	qcom,iommu-bfb-data =  <0x00000003
+				0x0
+				0x00000004
+				0x00000010
+				0x00000000
+				0x00000000
+				0x00000001
+				0x00000011
+				0x0
+				0x1
+				0x41
+				0x0>;
 };
 
 &vfe_iommu {
 	status = "ok";
+
+	qcom,iommu-bfb-regs =  <0x604c
+				0x6050
+				0x6514
+				0x6540
+				0x656c
+				0x6314
+				0x6394
+				0x6414
+				0x60ac
+				0x615c
+				0x620c
+				0x6008
+				0x600c
+				0x6010
+				0x6014
+				0x6018
+				0x601c
+				0x6020>;
+
+	qcom,iommu-bfb-data =  <0xffffffff
+				0x00000000
+				0x4
+				0x8
+				0x0
+				0x0
+				0x1b
+				0x5b
+				0x0
+				0x1b
+				0x2b
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0
+				0x0>;
 };
diff --git a/arch/arm/boot/dts/msm8226-mtp.dts b/arch/arm/boot/dts/msm8226-mtp.dts
index f3f2108..0242540 100644
--- a/arch/arm/boot/dts/msm8226-mtp.dts
+++ b/arch/arm/boot/dts/msm8226-mtp.dts
@@ -16,9 +16,69 @@
 / {
 	model = "Qualcomm MSM 8226 MTP";
 	compatible = "qcom,msm8226-mtp", "qcom,msm8226";
-	qcom,msm-id = <145 1 0>;
+	qcom,msm-id = <145 8 0>;
 
 	serial@f991f000 {
-		status = "disabled";
+		status = "ok";
 	};
-};
\ No newline at end of file
+};
+
+&sdcc1 {
+	vdd-supply = <&pm8226_l17>;
+	qcom,vdd-always-on;
+	qcom,vdd-lpm-sup;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <800 500000>;
+
+	vdd-io-supply = <&pm8226_l6>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <250 154000>;
+
+	qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+	qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,sup-voltages = <2950 2950>;
+
+	qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+	qcom,nonremovable;
+
+	status = "ok";
+};
+
+&sdcc2 {
+	vdd-supply = <&pm8226_l18>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <9000 800000>;
+
+	vdd-io-supply = <&pm8226_l21>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <6 22000>;
+
+	qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+	qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,sup-voltages = <2950 2950>;
+
+	qcom,xpc;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+	qcom,current-limit = <600>; #address-cells = <0>; interrupt-parent = <&sdcc2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+			1 &intc 0 220 0
+			2 &msmgpio 38 0x3>;
+	interrupt-names = "core_irq", "bam_irq", "status_irq";
+	cd-gpios = <&msmgpio 38 0x1>;
+
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226-qrd.dts b/arch/arm/boot/dts/msm8226-qrd.dts
index 14bf60b..65d4b33 100644
--- a/arch/arm/boot/dts/msm8226-qrd.dts
+++ b/arch/arm/boot/dts/msm8226-qrd.dts
@@ -16,9 +16,72 @@
 / {
 	model = "Qualcomm MSM 8226 QRD";
 	compatible = "qcom,msm8226-qrd", "qcom,msm8226";
-	qcom,msm-id = <145 1 0>;
+	qcom,msm-id = <145 11 0>;
 
 	serial@f991f000 {
-		status = "disabled";
+		status = "ok";
 	};
-};
\ No newline at end of file
+};
+
+&sdcc1 {
+	vdd-supply = <&pm8226_l17>;
+	qcom,vdd-always-on;
+	qcom,vdd-lpm-sup;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <800 500000>;
+
+	vdd-io-supply = <&pm8226_l6>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <250 154000>;
+
+	qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+	qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,sup-voltages = <2950 2950>;
+
+	qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+	qcom,nonremovable;
+
+	status = "ok";
+};
+
+&sdcc2 {
+	vdd-supply = <&pm8226_l18>;
+	qcom,vdd-voltage-level = <2950000 2950000>;
+	qcom,vdd-current-level = <9000 800000>;
+
+	vdd-io-supply = <&pm8226_l21>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <6 22000>;
+
+	qcom,pad-pull-on = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-pull-off = <0x0 0x3 0x3>; /* no-pull, pull-up, pull-up */
+	qcom,pad-drv-on = <0x4 0x4 0x4>; /* 16mA, 10mA, 10mA */
+	qcom,pad-drv-off = <0x0 0x0 0x0>; /* 2mA, 2mA, 2mA */
+
+	qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+	qcom,sup-voltages = <2950 2950>;
+
+	qcom,xpc;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+	qcom,current-limit = <600>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdcc2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 125 0
+			1 &intc 0 220 0
+			2 &msmgpio 38 0x3>;
+	interrupt-names = "core_irq", "bam_irq", "status_irq";
+	cd-gpios = <&msmgpio 38 0x1>;
+
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8226-regulator.dtsi b/arch/arm/boot/dts/msm8226-regulator.dtsi
index 50d2dba..8168826 100644
--- a/arch/arm/boot/dts/msm8226-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8226-regulator.dtsi
@@ -13,7 +13,7 @@
 /* Stub Regulators */
 
 / {
-	pm8026_s1_corner: regulator-s1-corner {
+	pm8226_s1_corner: regulator-s1-corner {
 		compatible = "qcom,stub-regulator";
 		regulator-name = "8226_s1_corner";
 		qcom,hpm-min-load = <100000>;
@@ -35,8 +35,8 @@
 			qcom,enable-time = <500>;
 			qcom,system-load = <100000>;
 			regulator-always-on;
-			regulator-min-microvolt = <1287500>;
-			regulator-max-microvolt = <1287500>;
+			regulator-min-microvolt = <1150000>;
+			regulator-max-microvolt = <1150000>;
 		};
 
 		pm8226_s2: regulator@1700 {
@@ -45,7 +45,7 @@
 			qcom,enable-time = <500>;
 			qcom,system-load = <100000>;
 			regulator-always-on;
-			regulator-min-microvolt = <1150000>;
+			regulator-min-microvolt = <1050000>;
 			regulator-max-microvolt = <1150000>;
 		};
 
@@ -104,8 +104,8 @@
 			qcom,system-load = <10000>;
 			regulator-always-on;
 			qcom,enable-time = <200>;
-			regulator-min-microvolt = <1287500>;
-			regulator-max-microvolt = <1287500>;
+			regulator-min-microvolt = <1150000>;
+			regulator-max-microvolt = <1150000>;
 		};
 
 		pm8226_l4: regulator@4300 {
diff --git a/arch/arm/boot/dts/msm8226-sim.dts b/arch/arm/boot/dts/msm8226-sim.dts
index f9ab957..b6590b3 100644
--- a/arch/arm/boot/dts/msm8226-sim.dts
+++ b/arch/arm/boot/dts/msm8226-sim.dts
@@ -17,7 +17,7 @@
 / {
 	model = "Qualcomm MSM 8226 Simulator";
 	compatible = "qcom,msm8226-sim", "qcom,msm8226";
-	qcom,msm-id = <145 1 0>;
+	qcom,msm-id = <145 16 0>;
 
 	serial@f991f000 {
 		status = "ok";
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index fea5082..741ffbd 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -44,6 +44,10 @@
 		qcom,direct-connect-irqs = <8>;
 	};
 
+	aliases {
+		spi0 = &spi_0;
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		interrupts = <1 2 0 1 3 0>;
@@ -81,9 +85,10 @@
 		reg = <0xf9a55000 0x400>;
 		interrupts = <0 134 0>, <0 140 0>;
 		interrupt-names = "core_irq", "async_irq";
-		HSUSB_VDDCX-supply = <&pm8226_s1>;
+		hsusb_vdd_dig-supply = <&pm8226_s1_corner>;
 		HSUSB_1p8-supply = <&pm8226_l10>;
 		HSUSB_3p3-supply = <&pm8226_l20>;
+		qcom,vdd-voltage-level = <1 5 7>;
 
 		qcom,hsusb-otg-phy-type = <2>;
 		qcom,hsusb-otg-mode = <1>;
@@ -528,7 +533,7 @@
 		reg = <0xfe200000 0x00100>,
 		      <0xfd485100 0x00010>;
 		reg-names = "qdsp6_base", "halt_base";
-		vdd_cx-supply = <&pm8026_s1_corner>;
+		vdd_cx-supply = <&pm8226_s1_corner>;
 		interrupts = <0 162 1>;
 
 		qcom,firmware-name = "adsp";
@@ -536,7 +541,7 @@
 
 	qcom,msm-mem-hole {
 		compatible = "qcom,msm-mem-hole";
-		qcom,memblock-remove = <0x8100000 0x7e00000>; /* Address and Size of Hole */
+		qcom,memblock-remove = <0x8400000 0x7b00000>; /* Address and Size of Hole */
 	};
 
 	tsens: tsens@fc4a8000 {
@@ -545,8 +550,8 @@
 		      <0xfc4b8000 0x1000>;
 		reg-names = "tsens_physical", "tsens_eeprom_physical";
 		interrupts = <0 184 0>;
-		qcom,sensors = <7>;
-		qcom,slope = <3200 3200 3200 3200 3200 3200 3200>;
+		qcom,sensors = <4>;
+		qcom,slope = <2901 2846 3038 2955>;
 		qcom,calib-mode = "fuse_map2";
 	};
 
@@ -559,6 +564,34 @@
 		qcom,freq-step = <2>;
 	};
 
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xF000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
+		gpios = <&msmgpio 3 0>, /* CLK  */
+			<&msmgpio 1 0>, /* MISO */
+			<&msmgpio 0 0>; /* MOSI */
+		cs-gpios = <&msmgpio 2 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+	};
+
+	qcom,bam_dmux@fc834000 {
+		compatible = "qcom,bam_dmux";
+		reg = <0xfc834000 0x7000>;
+		interrupts = <0 29 1>;
+	};
 };
 
 &gdsc_venus {
@@ -651,7 +684,7 @@
 		qcom,pre-div-channel-scaling = <0>;
 		qcom,calibration-type = "ratiometric";
 		qcom,scale-function = <1>;
-		qcom,hw-settle-time = <0>;
+		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
 	};
 
@@ -673,7 +706,7 @@
 		qcom,pre-div-channel-scaling = <0>;
 		qcom,calibration-type = "ratiometric";
 		qcom,scale-function = <4>;
-		qcom,hw-settle-time = <0>;
+		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
 	};
 };
diff --git a/arch/arm/boot/dts/msm8610-rumi.dts b/arch/arm/boot/dts/msm8610-rumi.dts
index af8ce2e..a4507e3 100644
--- a/arch/arm/boot/dts/msm8610-rumi.dts
+++ b/arch/arm/boot/dts/msm8610-rumi.dts
@@ -17,7 +17,7 @@
 / {
 	model = "Qualcomm MSM 8610 Rumi";
 	compatible = "qcom,msm8610-rumi", "qcom,msm8610";
-	qcom,msm-id = <147 1 0>;
+	qcom,msm-id = <147 15 0>;
 
 	serial@f991f000 {
 		status = "ok";
diff --git a/arch/arm/boot/dts/msm8610-sim.dts b/arch/arm/boot/dts/msm8610-sim.dts
index 73ba807..2268daf 100644
--- a/arch/arm/boot/dts/msm8610-sim.dts
+++ b/arch/arm/boot/dts/msm8610-sim.dts
@@ -17,7 +17,7 @@
 / {
 	model = "Qualcomm MSM 8610 Simulator";
 	compatible = "qcom,msm8610-sim", "qcom,msm8610";
-	qcom,msm-id = <147 1 0>;
+	qcom,msm-id = <147 16 0>;
 
 	serial@f991f000 {
 		status = "ok";
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 9ff383f..ce6011b 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -41,6 +41,10 @@
 		qcom,direct-connect-irqs = <8>;
 	};
 
+	aliases {
+		spi0 = &spi_0;
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		interrupts = <1 2 0 1 3 0>;
@@ -283,6 +287,30 @@
 		qcom,i2c-bus-freq = <100000>;
 	};
 
+
+	spi_0: spi@f9923000 { /* BLSP1 QUP1 */
+		compatible = "qcom,spi-qup-v2";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg-names = "spi_physical", "spi_bam_physical";
+		reg = <0xf9923000 0x1000>,
+		      <0xf9904000 0xF000>;
+		interrupt-names = "spi_irq", "spi_bam_irq";
+		interrupts = <0 95 0>, <0 238 0>;
+		spi-max-frequency = <19200000>;
+
+		gpios = <&msmgpio 3 0>, /* CLK  */
+			<&msmgpio 1 0>, /* MISO */
+			<&msmgpio 0 0>; /* MOSI */
+		cs-gpios = <&msmgpio 2 0>;
+
+		qcom,infinite-mode = <0>;
+		qcom,use-bam;
+		qcom,ver-reg-exists;
+		qcom,bam-consumer-pipe-index = <12>;
+		qcom,bam-producer-pipe-index = <13>;
+	};
+
 	qcom,pronto@fb21b000 {
 		compatible = "qcom,pil-pronto";
 		reg = <0xfb21b000 0x3000>,
diff --git a/arch/arm/boot/dts/msm8974-cdp.dtsi b/arch/arm/boot/dts/msm8974-cdp.dtsi
index f5f7fbd..0acfaf6 100644
--- a/arch/arm/boot/dts/msm8974-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8974-cdp.dtsi
@@ -230,7 +230,7 @@
 				linux,default-trigger = "bkl-trigger";
 				qcom,cs-out-en;
 				qcom,op-fdbck;
-				qcom,default-state = "off";
+				qcom,default-state = "on";
 				qcom,max-current = <25>;
 				qcom,ctrl-delay-us = <0>;
 				qcom,boost-curr-lim = <3>;
diff --git a/arch/arm/boot/dts/msm8974-coresight.dtsi b/arch/arm/boot/dts/msm8974-coresight.dtsi
index 427ef0b..5df8f10 100644
--- a/arch/arm/boot/dts/msm8974-coresight.dtsi
+++ b/arch/arm/boot/dts/msm8974-coresight.dtsi
@@ -15,6 +15,7 @@
 		compatible = "arm,coresight-tmc";
 		reg = <0xfc322000 0x1000>,
 		      <0xfc37c000 0x3000>;
+		reg-names = "tmc-etr-base", "tmc-etr-bam-base";
 
 		qcom,memory-reservation-type = "EBI1";
 		qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
@@ -22,11 +23,13 @@
 		coresight-id = <0>;
 		coresight-name = "coresight-tmc-etr";
 		coresight-nr-inports = <1>;
+		coresight-ctis = <&cti0 &cti8>;
 	};
 
 	tpiu: tpiu@fc318000 {
 		compatible = "arm,coresight-tpiu";
 		reg = <0xfc318000 0x1000>;
+		reg-names = "tpiu-base";
 
 		coresight-id = <1>;
 		coresight-name = "coresight-tpiu";
@@ -36,6 +39,7 @@
 	replicator: replicator@fc31c000 {
 		compatible = "qcom,coresight-replicator";
 		reg = <0xfc31c000 0x1000>;
+		reg-names = "replicator-base";
 
 		coresight-id = <2>;
 		coresight-name = "coresight-replicator";
@@ -48,6 +52,7 @@
 	tmc_etf: tmc@fc307000 {
 		compatible = "arm,coresight-tmc";
 		reg = <0xfc307000 0x1000>;
+		reg-names = "tmc-etf-base";
 
 		coresight-id = <3>;
 		coresight-name = "coresight-tmc-etf";
@@ -56,11 +61,13 @@
 		coresight-child-list = <&replicator>;
 		coresight-child-ports = <0>;
 		coresight-default-sink;
+		coresight-ctis = <&cti0 &cti8>;
 	};
 
 	funnel_merg: funnel@fc31b000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc31b000 0x1000>;
+		reg-names = "funnel-merg-base";
 
 		coresight-id = <4>;
 		coresight-name = "coresight-funnel-merg";
@@ -73,6 +80,7 @@
 	funnel_in0: funnel@fc319000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc319000 0x1000>;
+		reg-names = "funnel-in0-base";
 
 		coresight-id = <5>;
 		coresight-name = "coresight-funnel-in0";
@@ -85,6 +93,7 @@
 	funnel_in1: funnel@fc31a000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc31a000 0x1000>;
+		reg-names = "funnel-in1-base";
 
 		coresight-id = <6>;
 		coresight-name = "coresight-funnel-in1";
@@ -97,6 +106,7 @@
 	funnel_kpss: funnel@fc345000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc345000 0x1000>;
+		reg-names = "funnel-kpss-base";
 
 		coresight-id = <7>;
 		coresight-name = "coresight-funnel-kpss";
@@ -109,6 +119,8 @@
 	funnel_mmss: funnel@fc364000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc364000 0x1000>;
+		reg-names = "funnel-mmss-base";
+
 
 		coresight-id = <8>;
 		coresight-name = "coresight-funnel-mmss";
@@ -122,6 +134,7 @@
 		compatible = "arm,coresight-stm";
 		reg = <0xfc321000 0x1000>,
 		      <0xfa280000 0x180000>;
+		reg-names = "stm-base", "stm-data-base";
 
 		coresight-id = <9>;
 		coresight-name = "coresight-stm";
@@ -134,6 +147,7 @@
 	etm0: etm@fc33c000 {
 		compatible = "arm,coresight-etm";
 		reg = <0xfc33c000 0x1000>;
+		reg-names = "etm0-base";
 
 		coresight-id = <10>;
 		coresight-name = "coresight-etm0";
@@ -149,6 +163,7 @@
 	etm1: etm@fc33d000 {
 		compatible = "arm,coresight-etm";
 		reg = <0xfc33d000 0x1000>;
+		reg-names = "etm1-base";
 
 		coresight-id = <11>;
 		coresight-name = "coresight-etm1";
@@ -164,6 +179,7 @@
 	etm2: etm@fc33e000 {
 		compatible = "arm,coresight-etm";
 		reg = <0xfc33e000 0x1000>;
+		reg-names = "etm2-base";
 
 		coresight-id = <12>;
 		coresight-name = "coresight-etm2";
@@ -179,6 +195,7 @@
 	etm3: etm@fc33f000 {
 		compatible = "arm,coresight-etm";
 		reg = <0xfc33f000 0x1000>;
+		reg-names = "etm3-base";
 
 		coresight-id = <13>;
 		coresight-name = "coresight-etm3";
@@ -194,6 +211,7 @@
 	csr: csr@fc302000 {
 		compatible = "qcom,coresight-csr";
 		reg = <0xfc302000 0x1000>;
+		reg-names = "csr-base";
 
 		coresight-id = <14>;
 		coresight-name = "coresight-csr";
@@ -201,4 +219,144 @@
 
 		qcom,blk-size = <3>;
 	};
+
+	cti0: cti@fc308000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc308000 0x1000>;
+		reg-names = "cti0-base";
+
+		coresight-id = <15>;
+		coresight-name = "coresight-cti0";
+		coresight-nr-inports = <0>;
+	};
+
+	cti1: cti@fc309000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc309000 0x1000>;
+		reg-names = "cti1-base";
+
+		coresight-id = <16>;
+		coresight-name = "coresight-cti1";
+		coresight-nr-inports = <0>;
+	};
+
+	cti2: cti@fc30a000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30a000 0x1000>;
+		reg-names = "cti2-base";
+
+		coresight-id = <17>;
+		coresight-name = "coresight-cti2";
+		coresight-nr-inports = <0>;
+	};
+
+	cti3: cti@fc30b000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30b000 0x1000>;
+		reg-names = "cti3-base";
+
+		coresight-id = <18>;
+		coresight-name = "coresight-cti3";
+		coresight-nr-inports = <0>;
+	};
+
+	cti4: cti@fc30c000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30c000 0x1000>;
+		reg-names = "cti4-base";
+
+		coresight-id = <19>;
+		coresight-name = "coresight-cti4";
+		coresight-nr-inports = <0>;
+	};
+
+	cti5: cti@fc30d000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30d000 0x1000>;
+		reg-names = "cti5-base";
+
+		coresight-id = <20>;
+		coresight-name = "coresight-cti5";
+		coresight-nr-inports = <0>;
+	};
+
+	cti6: cti@fc30e000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30e000 0x1000>;
+		reg-names = "cti6-base";
+
+		coresight-id = <21>;
+		coresight-name = "coresight-cti6";
+		coresight-nr-inports = <0>;
+	};
+
+	cti7: cti@fc30f000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30f000 0x1000>;
+		reg-names = "cti7-base";
+
+		coresight-id = <22>;
+		coresight-name = "coresight-cti7";
+		coresight-nr-inports = <0>;
+	};
+
+	cti8: cti@fc310000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc310000 0x1000>;
+		reg-names = "cti8-base";
+
+		coresight-id = <23>;
+		coresight-name = "coresight-cti8";
+		coresight-nr-inports = <0>;
+	};
+
+	cti_l2: cti@fc340000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc340000 0x1000>;
+		reg-names = "cti-l2-base";
+
+		coresight-id = <24>;
+		coresight-name = "coresight-cti-l2";
+		coresight-nr-inports = <0>;
+	};
+
+	cti_cpu0: cti@fc341000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc341000 0x1000>;
+		reg-names = "cti-cpu0-base";
+
+		coresight-id = <25>;
+		coresight-name = "coresight-cti-cpu0";
+		coresight-nr-inports = <0>;
+	};
+
+	cti_cpu1: cti@fc342000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc342000 0x1000>;
+		reg-names = "cti-cpu1-base";
+
+		coresight-id = <26>;
+		coresight-name = "coresight-cti-cpu1";
+		coresight-nr-inports = <0>;
+	};
+
+	cti_cpu2: cti@fc343000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc343000 0x1000>;
+		reg-names = "cti-cpu2-base";
+
+		coresight-id = <27>;
+		coresight-name = "coresight-cti-cpu2";
+		coresight-nr-inports = <0>;
+	};
+
+	cti_cpu3: cti@fc344000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc344000 0x1000>;
+		reg-names = "cti-cpu3-base";
+
+		coresight-id = <28>;
+		coresight-name = "coresight-cti-cpu3";
+		coresight-nr-inports = <0>;
+	};
 };
diff --git a/arch/arm/boot/dts/msm8974-fluid.dtsi b/arch/arm/boot/dts/msm8974-fluid.dtsi
index 11c835f..92a6e01 100644
--- a/arch/arm/boot/dts/msm8974-fluid.dtsi
+++ b/arch/arm/boot/dts/msm8974-fluid.dtsi
@@ -224,7 +224,7 @@
 				linux,default-trigger = "bkl-trigger";
 				qcom,cs-out-en;
 				qcom,op-fdbck;
-				qcom,default-state = "off";
+				qcom,default-state = "on";
 				qcom,max-current = <25>;
 				qcom,ctrl-delay-us = <0>;
 				qcom,boost-curr-lim = <3>;
diff --git a/arch/arm/boot/dts/msm8974-ion.dtsi b/arch/arm/boot/dts/msm8974-ion.dtsi
index f55cff2..dfa22c1 100644
--- a/arch/arm/boot/dts/msm8974-ion.dtsi
+++ b/arch/arm/boot/dts/msm8974-ion.dtsi
@@ -24,8 +24,7 @@
 			compatible = "qcom,msm-ion-reserve";
 			reg = <8>;
 			qcom,heap-align = <0x1000>;
-			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
-			qcom,memory-reservation-size = <0x7800000>;
+			linux,contiguous-region = <&secure_mem>;
 		};
 
 		qcom,ion-heap@25 { /* IOMMU HEAP */
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index e97678a..6e2719b 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -199,6 +199,90 @@
 					00 00
 					];
 			};
+			atmel,cfg_2 {
+				atmel,family-id = <0xa2>;
+				atmel,variant-id = <0x00>;
+				atmel,version = <0x11>;
+				atmel,build = <0xaa>;
+				atmel,config = [
+					/* Object 6, Instance = 0 */
+					00 00 00 00 00 00
+					/* Object 38, Instance = 0 */
+					19 01 00 0D 02 0D 00 00 00 00
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00
+					/* Object 7, Instance = 0 */
+					20 08 32 C3
+					/* Object 8, Instance = 0 */
+					41 00 14 14 00 00 00 01 00 00
+					/* Object 9, Instance = 0 */
+					8F 00 00 20 34 00 87 4B 02 03
+					00 05 03 40 0A 14 14 0A 80 07
+					38 04 03 03 03 03 08 28 02 3C
+					0F 0F 2E 33 01 00
+					/* Object 15, Instance = 0 */
+					00 00 00 00 00 00 00 00 00 00
+					00
+					/* Object 18, Instance = 0 */
+					04 00
+					/* Object 24, Instance = 0 */
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00 00 00 00 00 00
+					/* Object 25, Instance = 0 */
+					00 00 54 6F F0 55 00 00 00 00
+					00 00 00 00 00
+					/* Object 27, Instance = 0 */
+					00 00 00 00 00 00 00
+					/* Object 40, Instance = 0 */
+					00 14 14 14 14
+					/* Object 42, Instance = 0 */
+					23 32 14 14 80 00 0A 00 05 05
+					/* Object 43, Instance = 0 */
+					08 00 01 01 91 00 80 00 00 00
+					00 00
+					/* Object 46, Instance = 0 */
+					00 00 18 18 00 00 01 00 00 0F
+					0A
+					/* Object 47, Instance = 0 */
+					00 14 28 02 05 28 01 78 03 10
+					00 00 0C 00 00 00 00 00 00 00
+					00 00
+					/* Object 55, Instance = 0 */
+					00 00 00 00 00 00 00
+					/* Object 56, Instance = 0 */
+					01 00 00 30 13 14 14 14 15 15
+					15 15 15 15 15 16 16 16 16 16
+					16 16 16 16 16 15 14 14 14 14
+					15 14 14 14 14 13 03 20 03 01
+					0A 04 00 00 00 00 00 00 00 00
+					1A
+					/* Object 57, Instance = 0 */
+					00 00 00
+					/* Object 61, Instance = 0 */
+					00 00 00 00 00
+					/* Object 62, Instance = 0 */
+					00 03 00 07 02 00 00 00 00 00
+					0F 17 23 2D 05 00 05 03 03 69
+					14 14 34 11 64 06 06 04 40 00
+					00 00 00 00 69 3C 02 04 01 00
+					0A 14 14 03 03 03 03 00 00 00
+					00 64 1E 01 00 00 00 00 00 00
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00
+					/* Object 63, Instance = 0 */
+					00 00 00 00 00 00 00 00 00 00
+					00 00
+					/* Object 65, Instance = 0 */
+					00 00 00 00 00 00 00 00 00 00
+					00 00 00 00 00 00 00
+					/* Object 66, Instance = 0 */
+					00 00 00 00 00
+					];
+			};
 		};
 	};
 
@@ -210,6 +294,11 @@
 		enable-active-high;
 	};
 
+	bt_ar3002 {
+		compatible = "qca,ar3002";
+		qca,bt-reset-gpio = <&pm8941_gpios 34 0>;
+	};
+
 	sound {
 		qcom,model = "msm8974-taiko-liquid-snd-card";
 
@@ -291,7 +380,7 @@
 };
 
 &usb3 {
-	qcom,charging-disabled;
+	qcom,otg-capability;
 };
 
 &pm8941_mvs1 {
@@ -619,3 +708,33 @@
 		};
 	};
 };
+
+&pm8941_chg {
+	status = "ok";
+
+	qcom,chg-charging-disabled;
+
+	qcom,chg-chgr@1000 {
+		status = "ok";
+	};
+
+	qcom,chg-buck@1100 {
+		status = "ok";
+	};
+
+	qcom,chg-usb-chgpth@1300 {
+		status = "ok";
+	};
+
+	qcom,chg-dc-chgpth@1400 {
+		status = "ok";
+	};
+
+	qcom,chg-boost@1500 {
+		status = "ok";
+	};
+
+	qcom,chg-misc@1600 {
+		status = "ok";
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-mtp.dtsi b/arch/arm/boot/dts/msm8974-mtp.dtsi
index 50fd6ff..c6935f4 100644
--- a/arch/arm/boot/dts/msm8974-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974-mtp.dtsi
@@ -199,7 +199,7 @@
 				linux,default-trigger = "bkl-trigger";
 				qcom,cs-out-en;
 				qcom,op-fdbck;
-				qcom,default-state = "off";
+				qcom,default-state = "on";
 				qcom,max-current = <25>;
 				qcom,ctrl-delay-us = <0>;
 				qcom,boost-curr-lim = <3>;
diff --git a/arch/arm/boot/dts/msm8974-v1-pm.dtsi b/arch/arm/boot/dts/msm8974-pm.dtsi
similarity index 100%
rename from arch/arm/boot/dts/msm8974-v1-pm.dtsi
rename to arch/arm/boot/dts/msm8974-pm.dtsi
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 1a6d9ba..2dad8e7 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -423,68 +423,75 @@
 };
 
 / {
-	krait0_vreg: regulator@f9088000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait0";
-		reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
-			<0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
-		reg-names = "acs", "mdd";
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-		qcom,headroom-voltage = <150000>;
-		qcom,retention-voltage = <675000>;
-		qcom,ldo-default-voltage = <750000>;
-		qcom,ldo-threshold-voltage = <850000>;
-		qcom,ldo-delta-voltage = <50000>;
-		qcom,cpu-num = <0>;
-	};
+	krait_pdn: krait-pdn {
+		compatible = "qcom,krait-pdn";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
 
-	krait1_vreg: regulator@f9098000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait1";
-		reg = <0xf9098000 0x1000>, /* APCS_ALIAS1_KPSS_ACS */
-			<0xf909a800 0x1000>; /* APCS_ALIAS1_KPSS_MDD */
-		reg-names = "acs", "mdd";
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-		qcom,headroom-voltage = <150000>;
-		qcom,retention-voltage = <675000>;
-		qcom,ldo-default-voltage = <750000>;
-		qcom,ldo-threshold-voltage = <850000>;
-		qcom,ldo-delta-voltage = <50000>;
-		qcom,cpu-num = <1>;
-	};
+		krait0_vreg: regulator@f9088000 {
+			compatible = "qcom,krait-regulator";
+			regulator-name = "krait0";
+			reg = <0xf9088000 0x1000>, /* APCS_ALIAS0_KPSS_ACS */
+				<0xf908a800 0x1000>; /* APCS_ALIAS0_KPSS_MDD */
+			reg-names = "acs", "mdd";
+			regulator-min-microvolt = <500000>;
+			regulator-max-microvolt = <1100000>;
+			qcom,headroom-voltage = <150000>;
+			qcom,retention-voltage = <675000>;
+			qcom,ldo-default-voltage = <750000>;
+			qcom,ldo-threshold-voltage = <850000>;
+			qcom,ldo-delta-voltage = <50000>;
+			qcom,cpu-num = <0>;
+		};
 
-	krait2_vreg: regulator@f90a8000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait2";
-		reg = <0xf90a8000 0x1000>, /* APCS_ALIAS2_KPSS_ACS */
-			<0xf90aa800 0x1000>; /* APCS_ALIAS2_KPSS_MDD */
-		reg-names = "acs", "mdd";
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-		qcom,headroom-voltage = <150000>;
-		qcom,retention-voltage = <675000>;
-		qcom,ldo-default-voltage = <750000>;
-		qcom,ldo-threshold-voltage = <850000>;
-		qcom,ldo-delta-voltage = <50000>;
-		qcom,cpu-num = <2>;
-	};
+		krait1_vreg: regulator@f9098000 {
+			compatible = "qcom,krait-regulator";
+			regulator-name = "krait1";
+			reg = <0xf9098000 0x1000>, /* APCS_ALIAS1_KPSS_ACS */
+				<0xf909a800 0x1000>; /* APCS_ALIAS1_KPSS_MDD */
+			reg-names = "acs", "mdd";
+			regulator-min-microvolt = <500000>;
+			regulator-max-microvolt = <1100000>;
+			qcom,headroom-voltage = <150000>;
+			qcom,retention-voltage = <675000>;
+			qcom,ldo-default-voltage = <750000>;
+			qcom,ldo-threshold-voltage = <850000>;
+			qcom,ldo-delta-voltage = <50000>;
+			qcom,cpu-num = <1>;
+		};
 
-	krait3_vreg: regulator@f90b8000 {
-		compatible = "qcom,krait-regulator";
-		regulator-name = "krait3";
-		reg = <0xf90b8000 0x1000>, /* APCS_ALIAS3_KPSS_ACS */
-			<0xf90ba800 0x1000>; /* APCS_ALIAS3_KPSS_MDD */
-		reg-names = "acs", "mdd";
-		regulator-min-microvolt = <500000>;
-		regulator-max-microvolt = <1100000>;
-		qcom,headroom-voltage = <150000>;
-		qcom,retention-voltage = <675000>;
-		qcom,ldo-default-voltage = <750000>;
-		qcom,ldo-threshold-voltage = <850000>;
-		qcom,ldo-delta-voltage = <50000>;
-		qcom,cpu-num = <3>;
+		krait2_vreg: regulator@f90a8000 {
+			compatible = "qcom,krait-regulator";
+			regulator-name = "krait2";
+			reg = <0xf90a8000 0x1000>, /* APCS_ALIAS2_KPSS_ACS */
+				<0xf90aa800 0x1000>; /* APCS_ALIAS2_KPSS_MDD */
+			reg-names = "acs", "mdd";
+			regulator-min-microvolt = <500000>;
+			regulator-max-microvolt = <1100000>;
+			qcom,headroom-voltage = <150000>;
+			qcom,retention-voltage = <675000>;
+			qcom,ldo-default-voltage = <750000>;
+			qcom,ldo-threshold-voltage = <850000>;
+			qcom,ldo-delta-voltage = <50000>;
+			qcom,cpu-num = <2>;
+		};
+
+		krait3_vreg: regulator@f90b8000 {
+			compatible = "qcom,krait-regulator";
+			regulator-name = "krait3";
+			reg = <0xf90b8000 0x1000>, /* APCS_ALIAS3_KPSS_ACS */
+				<0xf90ba800 0x1000>; /* APCS_ALIAS3_KPSS_MDD */
+			reg-names = "acs", "mdd";
+			regulator-min-microvolt = <500000>;
+			regulator-max-microvolt = <1100000>;
+			qcom,headroom-voltage = <150000>;
+			qcom,retention-voltage = <675000>;
+			qcom,ldo-default-voltage = <750000>;
+			qcom,ldo-threshold-voltage = <850000>;
+			qcom,ldo-delta-voltage = <50000>;
+			qcom,cpu-num = <3>;
+		};
 	};
 
 	spi_eth_vreg: spi_eth_phy_vreg {
diff --git a/arch/arm/boot/dts/msm8974-v1-cdp.dts b/arch/arm/boot/dts/msm8974-v1-cdp.dts
index 33bd1fb..8db99b2 100644
--- a/arch/arm/boot/dts/msm8974-v1-cdp.dts
+++ b/arch/arm/boot/dts/msm8974-v1-cdp.dts
@@ -19,6 +19,10 @@
 	model = "Qualcomm MSM 8974 CDP";
 	compatible = "qcom,msm8974-cdp", "qcom,msm8974";
 	qcom,msm-id = <126 1 0>;
+
+	qcom,mdss_dsi_toshiba_720p_video {
+		qcom,cont-splash-enabled;
+	};
 };
 
 &ehci {
diff --git a/arch/arm/boot/dts/msm8974-v1-fluid.dts b/arch/arm/boot/dts/msm8974-v1-fluid.dts
index 0b435a3..60f2c4b 100644
--- a/arch/arm/boot/dts/msm8974-v1-fluid.dts
+++ b/arch/arm/boot/dts/msm8974-v1-fluid.dts
@@ -19,4 +19,8 @@
 	model = "Qualcomm MSM 8974 FLUID";
 	compatible = "qcom,msm8974-fluid", "qcom,msm8974";
 	qcom,msm-id = <126 3 0>;
+
+	qcom,mdss_dsi_toshiba_720p_video {
+		qcom,cont-splash-enabled;
+	};
 };
diff --git a/arch/arm/boot/dts/msm8974-v1-iommu-domains.dtsi b/arch/arm/boot/dts/msm8974-v1-iommu-domains.dtsi
new file mode 100644
index 0000000..6ea5b9e
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-v1-iommu-domains.dtsi
@@ -0,0 +1,31 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,iommu-domains {
+		compatible = "qcom,iommu-domains";
+
+		venus_domain_ns: qcom,iommu-domain1 {
+			label = "venus_ns";
+			qcom,iommu-contexts = <&venus_ns>;
+			qcom,virtual-addr-pool = <0x40000000 0x3f000000
+						  0x7f000000 0x1000000>;
+		};
+
+		venus_domain_cp: qcom,iommu-domain2 {
+			label = "venus_cp";
+			qcom,iommu-contexts = <&venus_cp>;
+			qcom,virtual-addr-pool = <0x1000000 0x3f000000>;
+			qcom,secure-domain;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-v1-mtp.dts b/arch/arm/boot/dts/msm8974-v1-mtp.dts
index 01e9fe2..2d52f78 100644
--- a/arch/arm/boot/dts/msm8974-v1-mtp.dts
+++ b/arch/arm/boot/dts/msm8974-v1-mtp.dts
@@ -19,4 +19,8 @@
 	model = "Qualcomm MSM 8974 MTP";
 	compatible = "qcom,msm8974-mtp", "qcom,msm8974";
 	qcom,msm-id = <126 8 0>;
+
+	qcom,mdss_dsi_toshiba_720p_video {
+		qcom,cont-splash-enabled;
+	};
 };
diff --git a/arch/arm/boot/dts/msm8974-v1.dtsi b/arch/arm/boot/dts/msm8974-v1.dtsi
index b85c7a5..64014b3 100644
--- a/arch/arm/boot/dts/msm8974-v1.dtsi
+++ b/arch/arm/boot/dts/msm8974-v1.dtsi
@@ -18,7 +18,7 @@
 
 /include/ "msm8974.dtsi"
 /include/ "msm8974-v1-iommu.dtsi"
-/include/ "msm8974-v1-pm.dtsi"
+/include/ "msm8974-v1-iommu-domains.dtsi"
 
 / {
 	android_usb@fc42b0c8 {
@@ -45,3 +45,81 @@
 &i2c_2 {
 	qcom,i2c-src-freq = <19200000>;
 };
+
+/* CoreSight */
+&tmc_etr {
+	qcom,reset-flush-race;
+};
+
+&stm {
+	qcom,write-64bit;
+};
+
+&msm_vidc {
+	qcom,vidc-cp-map = <0x1000000 0x3f000000>;
+	qcom,vidc-ns-map = <0x40000000 0x40000000>;
+	qcom,load-freq-tbl = <979200 410000000>,
+		<783360 410000000>,
+		<489600 266670000>,
+		<244800 133330000>;
+	qcom,reg-presets = <0x80004 0x1>,
+		<0x80178 0x00001FFF>,
+		<0x8017c 0x1FFF1FFF>,
+		<0x800b0 0x10101001>,
+		<0x800b4 0x10101010>,
+		<0x800b8 0x10101010>,
+		<0x800bc 0x00000010>,
+		<0x800c0 0x1010100f>,
+		<0x800c4 0x10101010>,
+		<0x800c8 0x10101010>,
+		<0x800cc 0x00000010>,
+		<0x800d0 0x00001010>,
+		<0x800d4 0x00001010>,
+		<0x800f0 0x00000030>,
+		<0x800d8 0x00000707>,
+		<0x800dc 0x00000707>,
+		<0x80124 0x00000001>,
+		<0xE0020 0x5555556>,
+		<0xE0024 0x0>;
+	qcom,bus-ports = <1>;
+	qcom,enc-ocmem-ab-ib = <0 0>,
+		<138200 1222000>,
+		<414700 1222000>,
+		<940000 2444000>,
+		<1880000 2444000>,
+		<3008000 3910400>,
+		<3760000 4888000>;
+	qcom,dec-ocmem-ab-ib = <0 0>,
+		<176900 1556640>,
+		<456200 1556640>,
+		<864800 1556640>,
+		<1729600 3113280>,
+		<2767360 4981248>,
+		<3459200 6226560>;
+	qcom,enc-ddr-ab-ib = <0 0>,
+		<60000 664950>,
+		<181000 664950>,
+		<403000 664950>,
+		<806000 1329900>,
+		<1289600 2127840>,
+		<161200 6400000>;
+	qcom,dec-ddr-ab-ib = <0 0>,
+		<110000 909000>,
+		<268000 909000>,
+		<505000 909000>,
+		<1010000 1818000>,
+		<1616000 2908800>,
+		<2020000 6400000>;
+	qcom,iommu-groups = <&venus_domain_ns &venus_domain_cp>;
+	qcom,iommu-group-buffer-types = <0xfff 0x1ff>;
+	qcom,buffer-type-tz-usage-table = <0x1 0x1>,
+					<0x1fe 0x2>;
+};
+
+&sfpb_spinlock {
+	status = "disable";
+};
+
+&ldrex_spinlock {
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-iommu-domains.dtsi b/arch/arm/boot/dts/msm8974-v2-iommu-domains.dtsi
new file mode 100644
index 0000000..a83815e
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-v2-iommu-domains.dtsi
@@ -0,0 +1,45 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,iommu-domains {
+		compatible = "qcom,iommu-domains";
+
+		venus_domain_ns: qcom,iommu-domain1 {
+			label = "venus_ns";
+			qcom,iommu-contexts = <&venus_ns>;
+			qcom,virtual-addr-pool = <0x5dc00000 0x7f000000
+						 0xdcc00000 0x1000000>;
+		};
+
+		venus_domain_sec_bitstream: qcom,iommu-domain2 {
+			label = "venus_sec_bitstream";
+			qcom,iommu-contexts = <&venus_sec_bitstream>;
+			qcom,virtual-addr-pool = <0x4b000000 0x12c00000>;
+			qcom,secure-domain;
+		};
+
+		venus_domain_sec_pixel: qcom,iommu-domain3 {
+			label = "venus_sec_pixel";
+			qcom,iommu-contexts = <&venus_sec_pixel>;
+			qcom,virtual-addr-pool = <0x25800000 0x25800000>;
+			qcom,secure-domain;
+		};
+
+		venus_domain_sec_non_pixel: qcom,iommu-domain4 {
+			label = "venus_sec_non_pixel";
+			qcom,iommu-contexts = <&venus_sec_non_pixel>;
+			qcom,virtual-addr-pool = <0x1000000 0x24800000>;
+			qcom,secure-domain;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-v2-pm.dtsi b/arch/arm/boot/dts/msm8974-v2-pm.dtsi
deleted file mode 100644
index 0ed55ff..0000000
--- a/arch/arm/boot/dts/msm8974-v2-pm.dtsi
+++ /dev/null
@@ -1,426 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/include/ "skeleton.dtsi"
-
-/ {
-	qcom,spm@f9089000 {
-		compatible = "qcom,spm-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		reg = <0xf9089000 0x1000>;
-		qcom,core-id = <0>;
-		qcom,saw2-ver-reg = <0xfd0>;
-		qcom,saw2-cfg = <0x01>;
-		qcom,saw2-avs-ctl = <0>;
-		qcom,saw2-avs-hysteresis = <0>;
-		qcom,saw2-avs-limit = <0>;
-		qcom,saw2-avs-dly= <0>;
-		qcom,saw2-spm-dly= <0x20000400>;
-		qcom,saw2-spm-ctl = <0x1>;
-		qcom,saw2-spm-cmd-wfi = [03 0b 0f];
-		qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
-		qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-		qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-	};
-
-	qcom,spm@f9099000 {
-		compatible = "qcom,spm-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		reg = <0xf9099000 0x1000>;
-		qcom,core-id = <1>;
-		qcom,saw2-ver-reg = <0xfd0>;
-		qcom,saw2-cfg = <0x01>;
-		qcom,saw2-avs-ctl = <0>;
-		qcom,saw2-avs-hysteresis = <0>;
-		qcom,saw2-avs-limit = <0>;
-		qcom,saw2-avs-dly= <0>;
-		qcom,saw2-spm-dly= <0x20000400>;
-		qcom,saw2-spm-ctl = <0x1>;
-		qcom,saw2-spm-cmd-wfi = [03 0b 0f];
-		qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
-		qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-		qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-	};
-
-	qcom,spm@f90a9000 {
-		compatible = "qcom,spm-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		reg = <0xf90a9000 0x1000>;
-		qcom,core-id = <2>;
-		qcom,saw2-ver-reg = <0xfd0>;
-		qcom,saw2-cfg = <0x01>;
-		qcom,saw2-avs-ctl = <0>;
-		qcom,saw2-avs-hysteresis = <0>;
-		qcom,saw2-avs-limit = <0>;
-		qcom,saw2-avs-dly= <0>;
-		qcom,saw2-spm-dly= <0x20000400>;
-		qcom,saw2-spm-ctl = <0x1>;
-		qcom,saw2-spm-cmd-wfi = [03 0b 0f];
-		qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
-		qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-		qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-	};
-
-	qcom,spm@f90b9000 {
-		compatible = "qcom,spm-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		reg = <0xf90b9000 0x1000>;
-		qcom,core-id = <3>;
-		qcom,saw2-ver-reg = <0xfd0>;
-		qcom,saw2-cfg = <0x01>;
-		qcom,saw2-avs-ctl = <0>;
-		qcom,saw2-avs-hysteresis = <0>;
-		qcom,saw2-avs-limit = <0>;
-		qcom,saw2-avs-dly= <0>;
-		qcom,saw2-spm-dly= <0x20000400>;
-		qcom,saw2-spm-ctl = <0x1>;
-		qcom,saw2-spm-cmd-wfi = [03 0b 0f];
-		qcom,saw2-spm-cmd-ret = [42 1b 00 d0 03 d4 5b 0b 00 42 1b 0f];
-		qcom,saw2-spm-cmd-spc = [00 20 50 80 60 70 10 E0 03 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-		qcom,saw2-spm-cmd-pc = [00 20 50 80 60 70 10 E0 07 6E 70 3B
-				E4 5B 82 2B 50 10 0B 30 06 26 30 0F];
-	};
-
-	qcom,spm@f9012000 {
-		compatible = "qcom,spm-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		reg = <0xf9012000 0x1000>;
-		qcom,core-id = <0xffff>; /* L2/APCS SAW */
-		qcom,saw2-ver-reg = <0xfd0>;
-		qcom,saw2-cfg = <0x14>;
-		qcom,saw2-avs-ctl = <0>;
-		qcom,saw2-avs-hysteresis = <0>;
-		qcom,saw2-avs-limit = <0>;
-		qcom,saw2-avs-dly= <0>;
-		qcom,saw2-spm-dly= <0x20000400>;
-		qcom,saw2-spm-ctl = <0x1>;
-		qcom,saw2-pmic-data0 = <0x02030080>;
-		qcom,saw2-pmic-data1 = <0x00030000>;
-		qcom,vctl-timeout-us = <50>;
-		qcom,vctl-port = <0x0>;
-		qcom,phase-port = <0x1>;
-		qcom,pfm-port = <0x2>;
-		qcom,saw2-spm-cmd-ret = [1f 00 20 03 22 00 0f];
-		qcom,saw2-spm-cmd-gdhs = [00 20 32 60 70 80 42 07 78 80 44 22 50
-				3b 60 02 32 50 0f];
-		qcom,saw2-spm-cmd-pc = [00 10 32 60 70 80 b0 11 42 07 01 b0 78
-				80 12 44 50 3b 60 02 32 50 0f];
-	};
-
-	qcom,lpm-resources {
-		compatible = "qcom,lpm-resources";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		qcom,lpm-resources@0 {
-			reg = <0x0>;
-			qcom,name = "vdd-dig";
-			qcom,resource-type = <0>;
-			qcom,type = <0x62706d73>;	/* "smpb" */
-			qcom,id = <0x02>;
-			qcom,key = <0x6e726f63>;	/* "corn" */
-			qcom,init-value = <5>;		/* Super Turbo */
-		};
-
-		qcom,lpm-resources@1 {
-			reg = <0x1>;
-			qcom,name = "vdd-mem";
-			qcom,resource-type = <0>;
-			qcom,type = <0x62706d73>;	/* "smpb" */
-			qcom,id = <0x01>;
-			qcom,key = <0x7675>;		/* "uv" */
-			qcom,init-value = <1050000>;	/* Super Turbo */
-		};
-
-		qcom,lpm-resources@2 {
-			reg = <0x2>;
-			qcom,name = "pxo";
-			qcom,resource-type = <0>;
-			qcom,type = <0x306b6c63>;	/* "clk0" */
-			qcom,id = <0x00>;
-			qcom,key = <0x62616e45>;	/* "Enab" */
-			qcom,init-value = <1>;		/* On */
-		};
-
-		qcom,lpm-resources@3 {
-			reg = <0x3>;
-			qcom,name = "l2";
-			qcom,resource-type = <1>;
-			qcom,init-value = <2>;		/* Retention */
-		};
-	};
-
-	qcom,lpm-levels {
-		compatible = "qcom,lpm-levels";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		qcom,lpm-level@0 {
-			reg = <0x0>;
-			qcom,mode = <0>;        /* MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT */
-			qcom,xo = <1>;          /* ON */
-			qcom,l2 = <2>;          /* Retention */
-			qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
-			qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
-			qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
-			qcom,vdd-dig-lower-bound = <4>;  /* NORMAL */
-			qcom,irqs-detectable;
-			qcom.gpios-detectable;
-			qcom,latency-us = <1>;
-			qcom,ss-power = <784>;
-			qcom,energy-overhead = <190000>;
-			qcom,time-overhead = <100>;
-		};
-
-		qcom,lpm-level@1 {
-			reg = <0x1>;
-			qcom,mode = <4>;        /* MSM_PM_SLEEP_MODE_RETENTION*/
-			qcom,xo = <1>;          /* ON */
-			qcom,l2 = <2>;          /* Retention */
-			qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
-			qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
-			qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
-			qcom,vdd-dig-lower-bound = <4>;  /* NORMAL */
-			qcom,irqs-detectable;
-			qcom.gpios-detectable;
-			qcom,latency-us = <75>;
-			qcom,ss-power = <735>;
-			qcom,energy-overhead = <77341>;
-			qcom,time-overhead = <105>;
-		};
-
-		qcom,lpm-level@2 {
-			reg = <0x2>;
-			qcom,mode = <2>;        /* MSM_PM_SLEEP_MODE_STANDALONE_POWER_COLLAPSE */
-			qcom,xo = <1>;          /* ON */
-			qcom,l2 = <2>;          /* Retention */
-			qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
-			qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
-			qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
-			qcom,vdd-dig-lower-bound = <4>;  /* NORMAL */
-			qcom,irqs-detectable;
-			qcom.gpios-detectable;
-			qcom,latency-us = <95>;
-			qcom,ss-power = <725>;
-			qcom,energy-overhead = <99500>;
-			qcom,time-overhead = <130>;
-		};
-
-		qcom,lpm-level@3 {
-			reg = <0x3>;
-			qcom,mode = <3>;        /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
-			qcom,xo = <1>;          /* ON */
-			qcom,l2 = <1>;          /* GDHS */
-			qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
-			qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
-			qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
-			qcom,vdd-dig-lower-bound = <4>;  /* NORMAL */
-			qcom,irqs-detectable;
-			qcom.gpios-detectable;
-			qcom,latency-us = <2000>;
-			qcom,ss-power = <138>;
-			qcom,energy-overhead = <1208400>;
-			qcom,time-overhead = <3200>;
-		};
-
-		qcom,lpm-level@4 {
-			reg = <0x4>;
-			qcom,mode = <3>;        /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
-			qcom,xo = <1>;          /* ON */
-			qcom,l2 = <1>;          /* GDHS */
-			qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
-			qcom,vdd-mem-lower-bound = <950000>; /* SVS SOC */
-			qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO  */
-			qcom,vdd-dig-lower-bound = <3>;  /* SVS SOC */
-			qcom,irqs-detectable;
-			qcom.gpios-detectable;
-			qcom,latency-us = <3000>;
-			qcom,ss-power = <110>;
-			qcom,energy-overhead = <1250300>;
-			qcom,time-overhead = <3500>;
-		};
-
-		qcom,lpm-level@5 {
-			reg = <0x5>;
-			qcom,mode = <3>;        /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
-			qcom,xo = <0>;          /* OFF */
-			qcom,l2 = <1>;          /* GDHS */
-			qcom,vdd-mem-upper-bound = <1050000>; /* SUPER TURBO */
-			qcom,vdd-mem-lower-bound = <950000>; /* NORMAL */
-			qcom,vdd-dig-upper-bound = <6>; /* SUPER TURBO */
-			qcom,vdd-dig-lower-bound = <4>;  /* NORMAL */
-			qcom,latency-us = <3000>;
-			qcom,ss-power = <68>;
-			qcom,energy-overhead = <1350200>;
-			qcom,time-overhead = <4000>;
-		};
-
-		qcom,lpm-level@6 {
-			reg = <0x6>;
-			qcom,mode= <3>;         /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
-			qcom,xo = <0>;          /* OFF */
-			qcom,l2 = <1>;          /* GDHS */
-			qcom,vdd-mem-upper-bound = <950000>; /* NORMAL */
-			qcom,vdd-mem-lower-bound = <950000>;  /* SVS SOC */
-			qcom,vdd-dig-upper-bound = <4>;  /* NORMAL */
-			qcom,vdd-dig-lower-bound = <3>;  /* SVS SOC */
-			qcom,latency-us = <18000>;
-			qcom,ss-power = <10>;
-			qcom,energy-overhead = <3202600>;
-			qcom,time-overhead = <27000>;
-		};
-
-		qcom,lpm-level@7 {
-			reg = <0x7>;
-			qcom,mode= <3>;         /* MSM_PM_SLEEP_MODE_POWER_COLLAPSE */
-			qcom,xo = <0>;          /* OFF */
-			qcom,l2 = <0>;          /* OFF */
-			qcom,vdd-mem-upper-bound = <950000>; /* SVS SOC */
-			qcom,vdd-mem-lower-bound = <675000>; /* RETENTION */
-			qcom,vdd-dig-upper-bound = <3>; /* SVS SOC */
-			qcom,vdd-dig-lower-bound = <1>; /* RETENTION */
-			qcom,latency-us = <20000>;
-			qcom,ss-power = <2>;
-			qcom,energy-overhead = <4252000>;
-			qcom,time-overhead = <32000>;
-		};
-	};
-
-	qcom,pm-boot {
-		compatible = "qcom,pm-boot";
-		qcom,mode = <0>; /* MSM_PM_BOOT_CONFIG_TZ */
-	};
-
-	qcom,mpm@fc4281d0 {
-		compatible = "qcom,mpm-v2";
-		reg = <0xfc4281d0 0x1000>, /* MSM_RPM_MPM_BASE 4K */
-		    <0xf9011008 0x4>;   /* MSM_APCS_GCC_BASE 4K */
-		reg-names = "vmpm", "ipc";
-		interrupts = <0 171 1>;
-
-		qcom,ipc-bit-offset = <1>;
-
-		qcom,gic-parent = <&intc>;
-		qcom,gic-map = <47 172>, /* usb2_hsic_async_wakeup_irq */
-			<53 104>, /* mdss_irq */
-			<62 222>, /* ee0_krait_hlos_spmi_periph_irq */
-			<0xff 57>,  /* mss_to_apps_irq(0) */
-			<0xff 58>,  /* mss_to_apps_irq(1) */
-			<0xff 59>,  /* mss_to_apps_irq(2) */
-			<0xff 60>,  /* mss_to_apps_irq(3) */
-			<0xff 173>, /* o_wcss_apss_smd_hi */
-			<0xff 174>, /* o_wcss_apss_smd_med */
-			<0xff 175>, /* o_wcss_apss_smd_low */
-			<0xff 176>, /* o_wcss_apss_smsm_irq */
-			<0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
-			<0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
-			<0xff 179>, /* o_wcss_apss_asic_intr
-
-			<0xff 188>, /* lpass_irq_out_apcs(0) */
-			<0xff 189>, /* lpass_irq_out_apcs(1) */
-			<0xff 190>, /* lpass_irq_out_apcs(2) */
-			<0xff 191>, /* lpass_irq_out_apcs(3) */
-			<0xff 192>, /* lpass_irq_out_apcs(4) */
-			<0xff 193>, /* lpass_irq_out_apcs(5) */
-			<0xff 194>, /* lpass_irq_out_apcs(6) */
-			<0xff 195>, /* lpass_irq_out_apcs(7) */
-			<0xff 196>, /* lpass_irq_out_apcs(8) */
-			<0xff 197>, /* lpass_irq_out_apcs(9) */
-			<0xff 200>, /* rpm_ipc(4) */
-			<0xff 201>, /* rpm_ipc(5) */
-			<0xff 202>, /* rpm_ipc(6) */
-			<0xff 203>, /* rpm_ipc(7) */
-			<0xff 204>, /* rpm_ipc(24) */
-			<0xff 205>, /* rpm_ipc(25) */
-			<0xff 206>, /* rpm_ipc(26) */
-			<0xff 207>, /* rpm_ipc(27) */
-			<0xff 240>; /* summary_irq_kpss */
-
-		qcom,gpio-parent = <&msmgpio>;
-		qcom,gpio-map = <3  102>,
-			<4  1 >,
-			<5  5 >,
-			<6  9 >,
-			<7  18>,
-			<8  20>,
-			<9  24>,
-			<10  27>,
-			<11  28>,
-			<12  34>,
-			<13  35>,
-			<14  37>,
-			<15  42>,
-			<16  44>,
-			<17  46>,
-			<18  50>,
-			<19  54>,
-			<20  59>,
-			<21  61>,
-			<22  62>,
-			<23  64>,
-			<24  65>,
-			<25  66>,
-			<26  67>,
-			<27  68>,
-			<28  71>,
-			<29  72>,
-			<30  73>,
-			<31  74>,
-			<32  75>,
-			<33  77>,
-			<34  79>,
-			<35  80>,
-			<36  82>,
-			<37  86>,
-			<38  92>,
-			<39  93>,
-			<40  95>;
-	};
-
-	qcom,pm-8x60@fe805664 {
-		compatible = "qcom,pm-8x60";
-		reg = <0xfe805664 0x40>;
-		qcom,pc-mode = <0>; /*MSM_PC_TZ_L2_INT */
-		qcom,use-sync-timer;
-		qcom,saw-turns-off-pll;
-	};
-
-	qcom,rpm-log@fc19dc00 {
-		compatible = "qcom,rpm-log";
-		reg = <0xfc19dc00 0x4000>;
-		qcom,rpm-addr-phys = <0xfc000000>;
-		qcom,offset-version = <4>;
-		qcom,offset-page-buffer-addr = <36>;
-		qcom,offset-log-len = <40>;
-		qcom,offset-log-len-mask = <44>;
-		qcom,offset-page-indices = <56>;
-	};
-
-	qcom,rpm-stats@0xfc19dbd0{
-		compatible = "qcom,rpm-stats";
-		reg = <0xfc19dbd0 0x1000>;
-		reg-names = "phys_addr_base";
-		qcom,sleep-stats-version = <2>;
-	};
-};
diff --git a/arch/arm/boot/dts/msm8974-v2.dtsi b/arch/arm/boot/dts/msm8974-v2.dtsi
index a245d8a..3dda20f 100644
--- a/arch/arm/boot/dts/msm8974-v2.dtsi
+++ b/arch/arm/boot/dts/msm8974-v2.dtsi
@@ -18,7 +18,7 @@
 
 /include/ "msm8974.dtsi"
 /include/ "msm8974-v2-iommu.dtsi"
-/include/ "msm8974-v2-pm.dtsi"
+/include/ "msm8974-v2-iommu-domains.dtsi"
 
 / {
 	android_usb@fe8050c8 {
@@ -64,3 +64,60 @@
 	qcom,mdss-intf-off = <0x00012500 0x00012700
 			      0x00012900 0x00012b00>;
 };
+
+&msm_vidc {
+	qcom,vidc-ns-map = <0x40000000 0x40000000>;
+	qcom,load-freq-tbl = <979200 465000000>,
+		<783360 465000000>,
+		<489600 266670000>,
+		<244800 133330000>;
+	qcom,reg-presets = <0x80070 0x11FFF>,
+		<0x80074 0xA4>,
+		<0x800A8 0x1FFF>,
+		<0x80124 0x3>,
+		<0xE0020 0x5555556>,
+		<0xE0024 0x0>;
+	qcom,bus-ports = <1>;
+	qcom,enc-ocmem-ab-ib = <0 0>,
+		<138000 1034000>,
+		<414000 1034000>,
+		<940000 1034000>,
+		<1880000 2068000>,
+		<3008000 3309000>,
+		<3760000 4136000>,
+		<4468000 2457000>;
+	qcom,dec-ocmem-ab-ib = <0 0>,
+		<176000 519000>,
+		<456000 519000>,
+		<864000 519000>,
+		<1728000 1038000>,
+		<2766000 1661000>,
+		<3456000 2076000>,
+		<3662000 2198000>;
+	qcom,enc-ddr-ab-ib = <0 0>,
+		<60000 302000>,
+		<182000 302000>,
+		<402000 302000>,
+		<804000 604000>,
+		<1288000 967000>,
+		<2340000 1404000>,
+		<24940000 1496000>;
+	qcom,dec-ddr-ab-ib = <0 0>,
+		<104000 303000>,
+		<268000 303000>,
+		<506000 303000>,
+		<1012000 606000>,
+		<1620000 970000>,
+		<2024000 1212000>,
+		<2132000 1279000>;
+	qcom,iommu-groups = <&venus_domain_ns &venus_domain_sec_bitstream
+			&venus_domain_sec_pixel &venus_domain_sec_non_pixel>;
+	qcom,iommu-group-buffer-types = <0xfff 0x91 0x42 0x120>;
+	qcom,buffer-type-tz-usage-table = <0x91 0x1>,
+					<0x42 0x2>,
+					<0x120 0x3>;
+};
+
+&krait_pdn {
+	qcom,use-phase-switching;
+};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 3b5b062..7c6a9d1 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -11,6 +11,7 @@
  */
 
 /include/ "skeleton.dtsi"
+/include/ "msm8974-pm.dtsi"
 /include/ "msm8974-camera.dtsi"
 /include/ "msm8974-coresight.dtsi"
 /include/ "msm-gdsc.dtsi"
@@ -30,6 +31,15 @@
 		spi7 = &spi_7;
 	};
 
+	memory {
+
+		secure_mem: region@0 {
+			linux,contiguous-region;
+			reg = <0 0x7800000>;
+			label = "secure_mem";
+		};
+	};
+
 	intc: interrupt-controller@F9000000 {
 		compatible = "qcom,msm-qgic2";
 		interrupt-controller;
@@ -70,65 +80,11 @@
 		reg = <0xfc4a3000 0x1000>;
 	};
 
-	qcom,vidc@fdc00000 {
+	msm_vidc: qcom,vidc@fdc00000 {
 		compatible = "qcom,msm-vidc";
 		reg = <0xfdc00000 0xff000>;
 		interrupts = <0 44 0>;
-		qcom,vidc-cp-map = <0x1000000 0x3f000000>;
-		qcom,vidc-ns-map = <0x40000000 0x40000000>;
-		qcom,load-freq-tbl = <979200 410000000>,
-			<783360 410000000>,
-			<489600 266670000>,
-			<244800 133330000>;
 		qcom,hfi = "venus";
-		qcom,reg-presets = <0x80004 0x1>,
-			<0x80178 0x00001FFF>,
-			<0x8017c 0x1FFF1FFF>,
-			<0x800b0 0x10101001>,
-			<0x800b4 0x10101010>,
-			<0x800b8 0x10101010>,
-			<0x800bc 0x00000010>,
-			<0x800c0 0x1010100f>,
-			<0x800c4 0x10101010>,
-			<0x800c8 0x10101010>,
-			<0x800cc 0x00000010>,
-			<0x800d0 0x00001010>,
-			<0x800d4 0x00001010>,
-			<0x800f0 0x00000030>,
-			<0x800d8 0x00000707>,
-			<0x800dc 0x00000707>,
-			<0x80124 0x00000001>,
-			<0xE0020 0x5555556>,
-			<0xE0024 0x0>;
-		qcom,bus-ports = <1>;
-		qcom,enc-ocmem-ab-ib = <0 0>,
-			<138200 1222000>,
-			<414700 1222000>,
-			<940000 2444000>,
-			<1880000 2444000>,
-			<3008000 3910400>,
-			<3760000 4888000>;
-		qcom,dec-ocmem-ab-ib = <0 0>,
-			<176900 1556640>,
-			<456200 1556640>,
-			<864800 1556640>,
-			<1729600 3113280>,
-			<2767360 4981248>,
-			<3459200 6226560>;
-		qcom,enc-ddr-ab-ib = <0 0>,
-			<60000 664950>,
-			<181000 664950>,
-			<403000 664950>,
-			<806000 1329900>,
-			<1289600 2127840>,
-			<161200 6400000>;
-		qcom,dec-ddr-ab-ib = <0 0>,
-			<110000 909000>,
-			<268000 909000>,
-			<505000 909000>,
-			<1010000 1818000>,
-			<1616000 2908800>,
-			<2020000 6400000>;
 	};
 
 	qcom,wfd {
@@ -912,6 +868,10 @@
 		qcom,firmware-name = "wcnss";
 	};
 
+	qcom,iris-fm {
+		compatible = "qcom,iris_fm";
+	};
+
 	qcom,wcnss-wlan@fb000000 {
 		compatible = "qcom,wcnss_wlan";
 		reg = <0xfb000000 0x280000>;
@@ -1063,6 +1023,14 @@
 		reg-names = "crypto-base","crypto-bam-base";
 		interrupts = <0 236 0>;
 		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <1>;
+                qcom,msm-bus,name = "qcedev-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>;
 	};
 
         qcom,qcrypto@fd444000 {
@@ -1072,6 +1040,14 @@
 		reg-names = "crypto-base","crypto-bam-base";
 		interrupts = <0 236 0>;
 		qcom,bam-pipe-pair = <2>;
+		qcom,ce-hw-instance = <1>;
+                qcom,msm-bus,name = "qcrypto-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>;
 	};
 
 	qcom,usbbam@f9304000 {
@@ -1172,11 +1148,10 @@
         };
 
 	uart7: uart@f995d000 { /*BLSP #2, UART #7 */
-		cell-index = <0>;
 		compatible = "qcom,msm-hsuart-v14";
 		status = "disabled";
 		reg = <0xf995d000 0x1000>,
-			<0xf9944000 0x5000>;
+			<0xf9944000 0x19000>;
 		reg-names = "core_mem", "bam_mem";
 		interrupts = <0 113 0>, <0 239 0>;
 		interrupt-names = "core_irq", "bam_irq";
@@ -1263,6 +1238,23 @@
 	qcom,bcl {
 		compatible = "qcom,bcl";
 	};
+
+	qcom,ssm {
+		compatible = "qcom,ssm";
+		qcom,channel-name = "SSM_RTR";
+	};
+
+	sfpb_spinlock: qcom,ipc-spinlock@fd484000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0xfd484000 0x1000>;
+		qcom,num-locks = <32>;
+	};
+
+	ldrex_spinlock: qcom,ipc-spinlock@fa00000 {
+		compatible = "qcom,ipc-spinlock-ldrex";
+		reg = <0xfa00000 0x200000>;
+		status = "disable";
+	};
 };
 
 &gdsc_venus {
diff --git a/arch/arm/boot/dts/msm9625-coresight.dtsi b/arch/arm/boot/dts/msm9625-coresight.dtsi
index 7a5aa5c..0af8fa5 100644
--- a/arch/arm/boot/dts/msm9625-coresight.dtsi
+++ b/arch/arm/boot/dts/msm9625-coresight.dtsi
@@ -15,18 +15,21 @@
 		compatible = "arm,coresight-tmc";
 		reg = <0xfc322000 0x1000>,
 		      <0xfc37c000 0x3000>;
+		reg-names = "tmc-etr-base", "tmc-etr-bam-base";
 
 		qcom,memory-reservation-type = "EBI1";
-		qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+		qcom,memory-reservation-size = <0x20000>; /* 128K EBI1 buffer */
 
 		coresight-id = <0>;
 		coresight-name = "coresight-tmc-etr";
 		coresight-nr-inports = <1>;
+		coresight-ctis = <&cti0 &cti8>;
 	};
 
 	tpiu: tpiu@fc318000 {
 		compatible = "arm,coresight-tpiu";
 		reg = <0xfc318000 0x1000>;
+		reg-names = "tpiu-base";
 
 		coresight-id = <1>;
 		coresight-name = "coresight-tpiu";
@@ -36,6 +39,7 @@
 	replicator: replicator@fc31c000 {
 		compatible = "qcom,coresight-replicator";
 		reg = <0xfc31c000 0x1000>;
+		reg-names = "replicator-base";
 
 		coresight-id = <2>;
 		coresight-name = "coresight-replicator";
@@ -48,6 +52,7 @@
 	tmc_etf: tmc@fc307000 {
 		compatible = "arm,coresight-tmc";
 		reg = <0xfc307000 0x1000>;
+		reg-names = "tmc-etf-base";
 
 		coresight-id = <3>;
 		coresight-name = "coresight-tmc-etf";
@@ -56,11 +61,13 @@
 		coresight-child-list = <&replicator>;
 		coresight-child-ports = <0>;
 		coresight-default-sink;
+		coresight-ctis = <&cti0 &cti8>;
 	};
 
 	funnel_merg: funnel@fc31b000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc31b000 0x1000>;
+		reg-names = "funnel-merg-base";
 
 		coresight-id = <4>;
 		coresight-name = "coresight-funnel-merg";
@@ -73,6 +80,7 @@
 	funnel_in0: funnel@fc319000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc319000 0x1000>;
+		reg-names = "funnel-in0-base";
 
 		coresight-id = <5>;
 		coresight-name = "coresight-funnel-in0";
@@ -85,6 +93,7 @@
 	funnel_in1: funnel@fc31a000 {
 		compatible = "arm,coresight-funnel";
 		reg = <0xfc31a000 0x1000>;
+		reg-names = "funnel-in1-base";
 
 		coresight-id = <6>;
 		coresight-name = "coresight-funnel-in1";
@@ -98,6 +107,7 @@
 		compatible = "arm,coresight-stm";
 		reg = <0xfc321000 0x1000>,
 		      <0xfa280000 0x180000>;
+		reg-names = "stm-base", "stm-data-base";
 
 		coresight-id = <7>;
 		coresight-name = "coresight-stm";
@@ -110,6 +120,7 @@
 	etm: etm@fc332000 {
 		compatible = "arm,coresight-etm";
 		reg = <0xfc332000 0x1000>;
+		reg-names = "etm-base";
 
 		coresight-id = <8>;
 		coresight-name = "coresight-etm";
@@ -124,6 +135,7 @@
 	csr: csr@fc302000 {
 		compatible = "qcom,coresight-csr";
 		reg = <0xfc302000 0x1000>;
+		reg-names = "csr-base";
 
 		coresight-id = <9>;
 		coresight-name = "coresight-csr";
@@ -131,4 +143,104 @@
 
 		qcom,blk-size = <1>;
 	};
+
+	cti0: cti@fc308000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc308000 0x1000>;
+		reg-names = "cti0-base";
+
+		coresight-id = <10>;
+		coresight-name = "coresight-cti0";
+		coresight-nr-inports = <0>;
+	};
+
+	cti1: cti@fc309000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc309000 0x1000>;
+		reg-names = "cti1-base";
+
+		coresight-id = <11>;
+		coresight-name = "coresight-cti1";
+		coresight-nr-inports = <0>;
+	};
+
+	cti2: cti@fc30a000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30a000 0x1000>;
+		reg-names = "cti2-base";
+
+		coresight-id = <12>;
+		coresight-name = "coresight-cti2";
+		coresight-nr-inports = <0>;
+	};
+
+	cti3: cti@fc30b000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30b000 0x1000>;
+		reg-names = "cti3-base";
+
+		coresight-id = <13>;
+		coresight-name = "coresight-cti3";
+		coresight-nr-inports = <0>;
+	};
+
+	cti4: cti@fc30c000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30c000 0x1000>;
+		reg-names = "cti4-base";
+
+		coresight-id = <14>;
+		coresight-name = "coresight-cti4";
+		coresight-nr-inports = <0>;
+	};
+
+	cti5: cti@fc30d000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30d000 0x1000>;
+		reg-names = "cti5-base";
+
+		coresight-id = <15>;
+		coresight-name = "coresight-cti5";
+		coresight-nr-inports = <0>;
+	};
+
+	cti6: cti@fc30e000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30e000 0x1000>;
+		reg-names = "cti6-base";
+
+		coresight-id = <16>;
+		coresight-name = "coresight-cti6";
+		coresight-nr-inports = <0>;
+	};
+
+	cti7: cti@fc30f000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc30f000 0x1000>;
+		reg-names = "cti7-base";
+
+		coresight-id = <17>;
+		coresight-name = "coresight-cti7";
+		coresight-nr-inports = <0>;
+	};
+
+	cti8: cti@fc310000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc310000 0x1000>;
+		reg-names = "cti8-base";
+
+		coresight-id = <18>;
+		coresight-name = "coresight-cti8";
+		coresight-nr-inports = <0>;
+	};
+
+	cti_cpu: cti@fc333000 {
+		compatible = "arm,coresight-cti";
+		reg = <0xfc333000 0x1000>;
+		reg-names = "cti-cpu-base";
+
+		coresight-id = <19>;
+		coresight-name = "coresight-cti-cpu";
+		coresight-nr-inports = <0>;
+	};
 };
diff --git a/arch/arm/boot/dts/msm9625-v1.dtsi b/arch/arm/boot/dts/msm9625-v1.dtsi
index 3e88158..54fe443 100644
--- a/arch/arm/boot/dts/msm9625-v1.dtsi
+++ b/arch/arm/boot/dts/msm9625-v1.dtsi
@@ -29,8 +29,23 @@
 		reg = <0xfc42a8c8 0xc8>;
 		qcom,android-usb-swfi-latency = <100>;
 	};
+
+	qcom,bam_dmux@fc834000 {
+		compatible = "qcom,bam_dmux";
+		reg = <0xfc834000 0x7000>;
+		interrupts = <0 29 1>;
+	};
 };
 
 &ipa_hw {
 	qcom,ipa-hw-ver = <1>; /* IPA h-w revision */
 };
+
+/* CoreSight */
+&tmc_etr {
+	qcom,reset-flush-race;
+};
+
+&stm {
+	qcom,write-64bit;
+};
diff --git a/arch/arm/boot/dts/msm9625.dtsi b/arch/arm/boot/dts/msm9625.dtsi
index 922616c..8517605 100644
--- a/arch/arm/boot/dts/msm9625.dtsi
+++ b/arch/arm/boot/dts/msm9625.dtsi
@@ -317,20 +317,16 @@
 		qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50";
 	};
 
-	qcom,bam_dmux@fc834000 {
-		compatible = "qcom,bam_dmux";
-		reg = <0xfc834000 0x7000>;
-		interrupts = <0 29 1>;
-	};
-
 	ipa_hw: qcom,ipa@fd4c0000 {
 		compatible = "qcom,ipa";
 		reg = <0xfd4c0000 0x26000>,
-		      <0xfd4c4000 0x14818>;
-		reg-names = "ipa-base", "bam-base";
+		      <0xfd4c4000 0x14818>,
+		      <0xfc834000 0x7000>;
+		reg-names = "ipa-base", "bam-base", "a2-bam-base";
 		interrupts = <0 252 0>,
-		             <0 253 0>;
-		interrupt-names = "ipa-irq", "bam-irq";
+		             <0 253 0>,
+		             <0 29 1>;
+		interrupt-names = "ipa-irq", "bam-irq", "a2-bam-irq";
 
 		qcom,pipe1 {
 			label = "a2-to-ipa";
@@ -519,6 +515,7 @@
 
 	qcom,msm-pcm {
 		compatible = "qcom,msm-pcm-dsp";
+		qcom,msm-pcm-dsp-id = <0>;
 	};
 
 	qcom,msm-pcm-routing {
@@ -587,6 +584,28 @@
 		compatible = "qcom,msm-stub-codec";
 	};
 
+	qcom,msm-auxpcm {
+		compatible = "qcom,msm-auxpcm-resource";
+		qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-slot = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+
+		qcom,msm-auxpcm-rx {
+			qcom,msm-auxpcm-dev-id = <4106>;
+			compatible = "qcom,msm-auxpcm-dev";
+		};
+
+		qcom,msm-auxpcm-tx {
+			qcom,msm-auxpcm-dev-id = <4107>;
+			compatible = "qcom,msm-auxpcm-dev";
+		};
+	};
+
 	qcom,msm-dai-mi2s {
 		compatible = "qcom,msm-dai-mi2s";
 		qcom,msm-dai-q6-mi2s-prim {
@@ -713,7 +732,7 @@
 		qcom,pre-div-channel-scaling = <0>;
 		qcom,calibration-type = "ratiometric";
 		qcom,scale-function = <0>;
-		qcom,hw-settle-time = <0>;
+		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
 	};
 
@@ -724,7 +743,7 @@
 		qcom,pre-div-channel-scaling = <0>;
 		qcom,calibration-type = "ratiometric";
 		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <0>;
+		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
 	};
 
@@ -735,7 +754,7 @@
 		qcom,pre-div-channel-scaling = <0>;
 		qcom,calibration-type = "ratiometric";
 		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <0>;
+		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
 	};
 
@@ -746,7 +765,7 @@
 		qcom,pre-div-channel-scaling = <0>;
 		qcom,calibration-type = "ratiometric";
 		qcom,scale-function = <4>;
-		qcom,hw-settle-time = <0>;
+		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
 	};
 
@@ -757,7 +776,7 @@
 		qcom,pre-div-channel-scaling = <0>;
 		qcom,calibration-type = "ratiometric";
 		qcom,scale-function = <4>;
-		qcom,hw-settle-time = <0>;
+		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
 	};
 };
diff --git a/arch/arm/boot/dts/skeleton.dtsi b/arch/arm/boot/dts/skeleton.dtsi
index b41d241..f9988cd 100644
--- a/arch/arm/boot/dts/skeleton.dtsi
+++ b/arch/arm/boot/dts/skeleton.dtsi
@@ -9,5 +9,10 @@
 	#size-cells = <1>;
 	chosen { };
 	aliases { };
-	memory { device_type = "memory"; reg = <0 0>; };
+	memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		device_type = "memory";
+		reg = <0 0>;
+	};
 };
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index 90ac2166..e6f4396 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -45,6 +45,7 @@
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_PKG4=y
 CONFIG_MSM_IPC_LOGGING=y
+CONFIG_MSM_BAM_DMUX=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER=y
@@ -61,6 +62,7 @@
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
 CONFIG_MSM_WATCHDOG_V2=y
+CONFIG_MSM_DLOAD_MODE=y
 CONFIG_MSM_ADSP_LOADER=m
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -77,6 +79,7 @@
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_WAKELOCK=y
 CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -106,6 +109,10 @@
 CONFIG_BRIDGE_NF_EBTABLES=y
 CONFIG_BRIDGE_EBT_BROUTE=y
 CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_CLS_FW=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_MD=y
@@ -113,6 +120,8 @@
 CONFIG_DM_CRYPT=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
+# CONFIG_MSM_RMNET is not set
+CONFIG_MSM_RMNET_BAM=y
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
@@ -176,9 +185,13 @@
 CONFIG_ANDROID_TIMED_GPIO=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_MSM_IOMMU=y
+CONFIG_MSM_IOMMU_PMON=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_MMC_MSM_SPS_SUPPORT=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_MSM is not set
+CONFIG_RTC_DRV_QPNP=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
@@ -207,6 +220,7 @@
 CONFIG_CRYPTO_TWOFISH=y
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=y
+CONFIG_QPNP_POWER_ON=y
 CONFIG_LIBCRC32C=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MEDIA_CAMERA_SUPPORT=y
@@ -222,3 +236,4 @@
 CONFIG_MSM_OCMEM_NONSECURE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_TSENS8974=y
+CONFIG_THERMAL_MONITOR=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 053e1ca..7362ea0 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -134,6 +134,8 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 CONFIG_IPV6_PRIVACY=y
@@ -522,6 +524,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_CTS=y
+CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_DEV_QCRYPTO=m
 CONFIG_CRYPTO_DEV_QCE=m
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 7b54eb4..bb34075 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -139,6 +139,8 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 CONFIG_IPV6_PRIVACY=y
@@ -539,6 +541,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_CTS=y
+CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_DEV_QCRYPTO=m
 CONFIG_CRYPTO_DEV_QCE=m
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 6d2b3c6..d36d5a2 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -108,14 +108,17 @@
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
 CONFIG_INET=y
 CONFIG_IP_ADVANCED_ROUTER=y
 CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
@@ -231,6 +234,7 @@
 CONFIG_GENLOCK_MISCDEVICE=y
 CONFIG_SYNC=y
 CONFIG_SW_SYNC=y
+CONFIG_CMA=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_HAPTIC_ISA1200=y
@@ -448,8 +452,10 @@
 CONFIG_DEBUG_USER=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_KEYS=y
+CONFIG_CRYPTO_NULL=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_DEV_QCRYPTO=m
 CONFIG_CRYPTO_DEV_QCE=m
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 64e13f5..df0b5f0 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -112,14 +112,17 @@
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
 CONFIG_INET=y
 CONFIG_IP_ADVANCED_ROUTER=y
 CONFIG_IP_MULTIPLE_TABLES=y
 CONFIG_IP_ROUTE_VERBOSE=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
@@ -235,6 +238,7 @@
 CONFIG_GENLOCK_MISCDEVICE=y
 CONFIG_SYNC=y
 CONFIG_SW_SYNC=y
+CONFIG_CMA=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_TSPP=m
@@ -468,10 +472,14 @@
 CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
 CONFIG_PID_IN_CONTEXTIDR=y
 CONFIG_KEYS=y
+CONFIG_CRYPTO_NULL=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_TWOFISH=y
 CONFIG_CRYPTO_DEV_QCRYPTO=m
 CONFIG_CRYPTO_DEV_QCE=m
diff --git a/arch/arm/configs/msm9625-perf_defconfig b/arch/arm/configs/msm9625-perf_defconfig
new file mode 100644
index 0000000..1fe528a
--- /dev/null
+++ b/arch/arm/configs/msm9625-perf_defconfig
@@ -0,0 +1,325 @@
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_MSM=y
+CONFIG_ARCH_MSM9625=y
+# CONFIG_MSM_STACKED_MEMORY is not set
+CONFIG_CPU_HAS_L2_PMU=y
+# CONFIG_MSM_FIQ_SUPPORT is not set
+# CONFIG_MSM_PROC_COMM is not set
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_PKG4=y
+CONFIG_MSM_BAM_DMUX=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_LOGGING=y
+CONFIG_MSM_IPC_ROUTER=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_RPM_REGULATOR_SMD=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_MSM_DIRECT_SCLK_ACCESS=y
+CONFIG_MSM_BUS_SCALING=y
+CONFIG_MSM_WATCHDOG_V2=y
+CONFIG_MSM_DLOAD_MODE=y
+CONFIG_MSM_ADSP_LOADER=m
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_VMALLOC_RESERVE=0x19000000
+CONFIG_USE_OF=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_DEBUG=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_IP_SET=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NATTYPE_MODULE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_CLS_FW=y
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_MTD_MSM_NAND is not set
+CONFIG_MTD_MSM_QPIC_NAND=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+CONFIG_KS8851=y
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_MSM_RMNET is not set
+# CONFIG_MSM_RMNET_BAM is not set
+CONFIG_MSM_RMNET_WWAN=y
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_ATH6K_LEGACY_EXT=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_HSL=y
+CONFIG_SERIAL_MSM_HS=y
+CONFIG_MSM_UARTDM_Core_v14=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QUP=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB=y
+CONFIG_MSM_QPNP_INT=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_SMB137C_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS8974=y
+CONFIG_WCD9320_CODEC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_MDM9625=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CI13XXX_MSM=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM_HSIC=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=m
+CONFIG_MMC_MSM=y
+CONFIG_MMC_MSM_SPS_SUPPORT=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_MSM is not set
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_SPS=y
+CONFIG_USB_BAM=y
+CONFIG_SPS_SUPPORT_BAMDMA=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_POWER_ON=y
+CONFIG_IPA=y
+CONFIG_ECM_IPA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_TMC=y
+CONFIG_CORESIGHT_TPIU=y
+CONFIG_CORESIGHT_FUNNEL=y
+CONFIG_CORESIGHT_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_ETM=y
+CONFIG_CORESIGHT_EVENT=m
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_YAFFS_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_YAFFS_DISABLE_TAGS_ECC=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_USER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_AUTHENC=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=m
+CONFIG_CRYPTO_DEV_QCE=m
+CONFIG_CRYPTO_DEV_QCEDEV=m
+CONFIG_CRC_CCITT=y
+CONFIG_LIBCRC32C=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EHSET=y
+CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+CONFIG_USB_STORAGE_ONETOUCH=y
+CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_MSM_RTB=y
+CONFIG_MSM_MEMORY_DUMP=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 0f93930..aa18209 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -172,7 +172,8 @@
 CONFIG_KS8851=y
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_MSM_RMNET is not set
-CONFIG_MSM_RMNET_BAM=y
+# CONFIG_MSM_RMNET_BAM is not set
+CONFIG_MSM_RMNET_WWAN=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -247,6 +248,7 @@
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_POWER_ON=y
 CONFIG_IPA=y
+CONFIG_ECM_IPA=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_TMC=y
 CONFIG_CORESIGHT_TPIU=y
@@ -321,3 +323,4 @@
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_MSM_RTB=y
+CONFIG_MSM_MEMORY_DUMP=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d021905..584fe0b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -16,7 +16,6 @@
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
 #include <asm/outercache.h>
-#include <asm/rodata.h>
 
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
deleted file mode 100644
index 8c8add8..0000000
--- a/arch/arm/include/asm/rodata.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- *  arch/arm/include/asm/rodata.h
- *
- *  Copyright (C) 2011 Google, Inc.
- *
- *  Author: Colin Cross <ccross@android.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASMARM_RODATA_H
-#define _ASMARM_RODATA_H
-
-#ifndef __ASSEMBLY__
-
-#ifdef CONFIG_DEBUG_RODATA
-
-int set_memory_rw(unsigned long virt, int numpages);
-int set_memory_ro(unsigned long virt, int numpages);
-
-void mark_rodata_ro(void);
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
-
-#endif
-
-#endif
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index bf17145..df0bf0c 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -13,7 +13,6 @@
  */
 
 #include <linux/ftrace.h>
-#include <linux/module.h>
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
@@ -64,20 +63,6 @@
 }
 #endif
 
-int ftrace_arch_code_modify_prepare(void)
-{
-	set_kernel_text_rw();
-	set_all_modules_text_rw();
-	return 0;
-}
-
-int ftrace_arch_code_modify_post_process(void)
-{
-	set_all_modules_text_ro();
-	set_kernel_text_ro();
-	return 0;
-}
-
 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
 {
 	return arm_gen_branch_link(pc, addr);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index e35a806..b10212e 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -411,6 +411,8 @@
 	select CPU_FREQ_GOV_USERSPACE
 	select CPU_FREQ_GOV_ONDEMAND
 	select MSM_PIL
+	select MSM_RUN_QUEUE_STATS
+	select ARM_HAS_SG_CHAIN
 
 config ARCH_MSM8226
 	bool "MSM8226"
@@ -437,6 +439,7 @@
 	select MEMORY_HOLE_CARVEOUT
 	select DONT_MAP_HOLE_AFTER_MEMBANK0
 	select MSM_BUS_SCALING
+	select ARM_HAS_SG_CHAIN
 endmenu
 
 choice
@@ -2312,7 +2315,6 @@
 
 config MSM_DLOAD_MODE
 	bool "Enable download mode on crashes"
-	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM9615 || ARCH_MSM8974 || ARCH_MSM9625
 	default n
 	help
 		This makes the SoC enter download mode when it resets
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 202b8dd..02d0b46 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -74,6 +74,9 @@
 # MSM8226
    zreladdr-$(CONFIG_ARCH_MSM8226)	:= 0x00008000
         dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-sim.dtb
+        dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-cdp.dtb
+        dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-mtp.dtb
+        dtb-$(CONFIG_ARCH_MSM8226)	+= msm8226-qrd.dtb
 
 # FSM9XXX
    zreladdr-$(CONFIG_ARCH_FSM9XXX)	:= 0x10008000
diff --git a/arch/arm/mach-msm/acpuclock-8226.c b/arch/arm/mach-msm/acpuclock-8226.c
index 7dc3a0e..8ba1b39 100644
--- a/arch/arm/mach-msm/acpuclock-8226.c
+++ b/arch/arm/mach-msm/acpuclock-8226.c
@@ -53,13 +53,13 @@
  * 3) Depending on Frodo version, may need minimum of LVL_NOM
  */
 static struct clkctl_acpu_speed acpu_freq_tbl[] = {
-	{ 0,   19200, CXO,     0, 0,   LVL_LOW,    950000, 0 },
-	{ 1,  300000, PLL0,    4, 2,   LVL_LOW,    950000, 4 },
-	{ 1,  384000, ACPUPLL, 5, 0,   LVL_LOW,    950000, 4 },
-	{ 1,  600000, PLL0,    4, 0,   LVL_NOM,    950000, 6 },
-	{ 1,  787200, ACPUPLL, 5, 0,   LVL_NOM,   1050000, 6 },
-	{ 1,  998400, ACPUPLL, 5, 0,   LVL_HIGH,  1050000, 7 },
-	{ 1, 1190400, ACPUPLL, 5, 0,   LVL_HIGH,  1050000, 7 },
+	{ 0,   19200, CXO,     0, 0,   1150000,   1150000, 0 },
+	{ 1,  300000, PLL0,    4, 2,   1150000,   1150000, 4 },
+	{ 1,  384000, ACPUPLL, 5, 0,   1150000,   1150000, 4 },
+	{ 1,  600000, PLL0,    4, 0,   1150000,   1150000, 6 },
+	{ 1,  787200, ACPUPLL, 5, 0,   1150000,   1150000, 6 },
+	{ 0,  998400, ACPUPLL, 5, 0,   1150000,   1150000, 7 },
+	{ 0, 1190400, ACPUPLL, 5, 0,   1150000,   1150000, 7 },
 	{ 0 }
 };
 
@@ -68,7 +68,7 @@
 	.current_speed = &(struct clkctl_acpu_speed){ 0 },
 	.bus_scale = &bus_client_pdata,
 	/* FIXME regulator doesn't support corners yet */
-	.vdd_max_cpu = 1050000,
+	.vdd_max_cpu = 1150000,
 	.vdd_max_mem = 1150000,
 	.src_clocks = {
 		[PLL0].name = "gpll0",
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index d8f5425..5211c6e 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -266,11 +266,17 @@
 	[0][2] = { acpu_freq_tbl_pvs2, sizeof(acpu_freq_tbl_pvs2) },
 	[0][3] = { acpu_freq_tbl_pvs3, sizeof(acpu_freq_tbl_pvs3) },
 	[0][4] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
+	[0][5] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
+	[0][6] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
+	[0][7] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
 	[1][0] = { acpu_freq_tbl_pvs0, sizeof(acpu_freq_tbl_pvs0) },
 	[1][1] = { acpu_freq_tbl_pvs1, sizeof(acpu_freq_tbl_pvs1) },
 	[1][2] = { acpu_freq_tbl_pvs2, sizeof(acpu_freq_tbl_pvs2) },
 	[1][3] = { acpu_freq_tbl_pvs3, sizeof(acpu_freq_tbl_pvs3) },
 	[1][4] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
+	[1][5] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
+	[1][6] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
+	[1][7] = { acpu_freq_tbl_pvs4, sizeof(acpu_freq_tbl_pvs4) },
 };
 
 static struct acpuclk_krait_params acpuclk_8974_params __initdata = {
diff --git a/arch/arm/mach-msm/acpuclock-cortex.c b/arch/arm/mach-msm/acpuclock-cortex.c
index 64e31ba..febf95a 100644
--- a/arch/arm/mach-msm/acpuclock-cortex.c
+++ b/arch/arm/mach-msm/acpuclock-cortex.c
@@ -324,13 +324,13 @@
 	}
 
 	/* Improve boot time by ramping up CPU immediately */
-	for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0 &&
-			acpuclk_init_data->freq_tbl[i].use_for_scaling; i++)
-		max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;
+	for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0; i++)
+		if (acpuclk_init_data->freq_tbl[i].use_for_scaling)
+			max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;
 
 	/* Initialize regulators */
-	rc = increase_vdd(acpuclk_init_data->freq_tbl[i].vdd_cpu,
-		acpuclk_init_data->freq_tbl[i].vdd_mem);
+	rc = increase_vdd(acpuclk_init_data->vdd_max_cpu,
+		acpuclk_init_data->vdd_max_mem);
 	if (rc)
 		goto err_vdd;
 
@@ -346,6 +346,12 @@
 		goto err_vdd_cpu;
 	}
 
+	/*
+	 * Select a state which is always a valid transition to align SW with
+	 * the HW configuration set by the bootloaders.
+	 */
+	acpuclk_cortex_set_rate(0, acpuclk_cortex_data.power_collapse_khz,
+		SETRATE_INIT);
 	acpuclk_cortex_set_rate(0, max_cpu_khz, SETRATE_INIT);
 
 	acpuclk_register(&acpuclk_cortex_data);
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index 00f64fc..11d58dd 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -50,7 +50,7 @@
 	PVS_NOMINAL = 1,
 	PVS_FAST = 3,
 	PVS_FASTER = 4,
-	NUM_PVS = 7
+	NUM_PVS = 8
 };
 
 /**
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index cf2f464..833b213 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -302,81 +302,56 @@
 #define bam_ch_is_in_reset(x)			\
 	(bam_ch[(x)].status & BAM_CH_IN_RESET)
 
-#define LOG_MESSAGE_MAX_SIZE 80
 struct kfifo bam_dmux_state_log;
-static uint32_t bam_dmux_state_logging_disabled;
 static int bam_dmux_uplink_vote;
 static int bam_dmux_power_state;
 
-static void bam_dmux_log(const char *fmt, ...)
-					__printf(1, 2);
-
-
-#define DMUX_LOG_KERR(fmt...) \
-do { \
-	bam_dmux_log(fmt); \
-	pr_err(fmt); \
-} while (0)
-
 static void *bam_ipc_log_txt;
 
 #define BAM_IPC_LOG_PAGES 5
 
 /**
  * Log a state change along with a small message.
- *
  * Complete size of messsage is limited to @todo.
+ * Logging is done using IPC Logging infrastructure.
+ *
+ * States
+ * D: 1 = Power collapse disabled
+ * R: 1 = in global reset
+ * P: 1 = BAM is powered up
+ * A: 1 = BAM initialized and ready for data
+ * V: 1 = Uplink vote for power
+ * U: 1 = Uplink active
+ * W: 1 = Uplink Wait-for-ack
+ * A: 1 = Uplink ACK received
+ * #: >=1 On-demand uplink vote
+ * D: 1 = Disconnect ACK active
  */
-static void bam_dmux_log(const char *fmt, ...)
-{
-	char buff[LOG_MESSAGE_MAX_SIZE];
-	va_list arg_list;
-	unsigned long long t_now;
-	unsigned long nanosec_rem;
-	int len = 0;
 
-	if (bam_dmux_state_logging_disabled)
-		return;
+#define BAM_DMUX_LOG(fmt, args...) \
+do { \
+	if (bam_ipc_log_txt) { \
+		ipc_log_string(bam_ipc_log_txt, \
+		"<DMUX> %c%c%c%c %c%c%c%c%d%c " fmt, \
+		a2_pc_disabled ? 'D' : 'd', \
+		in_global_reset ? 'R' : 'r', \
+		bam_dmux_power_state ? 'P' : 'p', \
+		bam_connection_is_active ? 'A' : 'a', \
+		bam_dmux_uplink_vote ? 'V' : 'v', \
+		bam_is_connected ?  'U' : 'u', \
+		wait_for_ack ? 'W' : 'w', \
+		ul_wakeup_ack_completion.done ? 'A' : 'a', \
+		atomic_read(&ul_ondemand_vote), \
+		disconnect_ack ? 'D' : 'd', \
+		args); \
+	} \
+} while (0)
 
-	t_now = sched_clock();
-	nanosec_rem = do_div(t_now, 1000000000U);
-
-	/*
-	 * States
-	 * D: 1 = Power collapse disabled
-	 * R: 1 = in global reset
-	 * P: 1 = BAM is powered up
-	 * A: 1 = BAM initialized and ready for data
-	 *
-	 * V: 1 = Uplink vote for power
-	 * U: 1 = Uplink active
-	 * W: 1 = Uplink Wait-for-ack
-	 * A: 1 = Uplink ACK received
-	 * #: >=1 On-demand uplink vote
-	 * D: 1 = Disconnect ACK active
-	 */
-	len += scnprintf(buff, sizeof(buff),
-		"<DMUX> %u.%09lu %c%c%c%c %c%c%c%c%d%c ",
-		(unsigned)t_now, nanosec_rem,
-		a2_pc_disabled ? 'D' : 'd',
-		in_global_reset ? 'R' : 'r',
-		bam_dmux_power_state ? 'P' : 'p',
-		bam_connection_is_active ? 'A' : 'a',
-		bam_dmux_uplink_vote ? 'V' : 'v',
-		bam_is_connected ?  'U' : 'u',
-		wait_for_ack ? 'W' : 'w',
-		ul_wakeup_ack_completion.done ? 'A' : 'a',
-		atomic_read(&ul_ondemand_vote),
-		disconnect_ack ? 'D' : 'd'
-		);
-
-	va_start(arg_list, fmt);
-	len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
-	va_end(arg_list);
-	memset(buff + len, 0x0, sizeof(buff) - len);
-	if (bam_ipc_log_txt)
-		ipc_log_string(bam_ipc_log_txt, buff);
-}
+#define DMUX_LOG_KERR(fmt, args...) \
+do { \
+	BAM_DMUX_LOG(fmt, args); \
+	pr_err(fmt, args); \
+} while (0)
 
 static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
 {
@@ -396,12 +371,12 @@
 	spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
 	list_for_each_entry(info, &bam_tx_pool, list_node) {
 		if (!reported) {
-			bam_dmux_log("%s: tx pool not empty\n", func);
+			BAM_DMUX_LOG("%s: tx pool not empty\n", func);
 			if (!in_global_reset)
 				pr_err("%s: tx pool not empty\n", func);
 			reported = 1;
 		}
-		bam_dmux_log("%s: node=%p ts=%u.%09lu\n", __func__,
+		BAM_DMUX_LOG("%s: node=%p ts=%u.%09lu\n", __func__,
 			&info->list_node, info->ts_sec, info->ts_nsec);
 		if (!in_global_reset)
 			pr_err("%s: node=%p ts=%u.%09lu\n", __func__,
@@ -529,7 +504,7 @@
 
 	mutex_lock(&bam_pdev_mutexlock);
 	if (in_global_reset) {
-		bam_dmux_log("%s: open cid %d aborted due to ssr\n",
+		BAM_DMUX_LOG("%s: open cid %d aborted due to ssr\n",
 				__func__, rx_hdr->ch_id);
 		mutex_unlock(&bam_pdev_mutexlock);
 		queue_rx();
@@ -593,18 +568,18 @@
 		bam_mux_process_data(rx_skb);
 		break;
 	case BAM_MUX_HDR_CMD_OPEN:
-		bam_dmux_log("%s: opening cid %d PC enabled\n", __func__,
+		BAM_DMUX_LOG("%s: opening cid %d PC enabled\n", __func__,
 				rx_hdr->ch_id);
 		handle_bam_mux_cmd_open(rx_hdr);
 		if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
-			bam_dmux_log("%s: deactivating disconnect ack\n",
+			BAM_DMUX_LOG("%s: deactivating disconnect ack\n",
 								__func__);
 			disconnect_ack = 0;
 		}
 		dev_kfree_skb_any(rx_skb);
 		break;
 	case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
-		bam_dmux_log("%s: opening cid %d PC disabled\n", __func__,
+		BAM_DMUX_LOG("%s: opening cid %d PC disabled\n", __func__,
 				rx_hdr->ch_id);
 
 		if (!a2_pc_disabled) {
@@ -617,11 +592,11 @@
 		break;
 	case BAM_MUX_HDR_CMD_CLOSE:
 		/* probably should drop pending write */
-		bam_dmux_log("%s: closing cid %d\n", __func__,
+		BAM_DMUX_LOG("%s: closing cid %d\n", __func__,
 				rx_hdr->ch_id);
 		mutex_lock(&bam_pdev_mutexlock);
 		if (in_global_reset) {
-			bam_dmux_log("%s: close cid %d aborted due to ssr\n",
+			BAM_DMUX_LOG("%s: close cid %d aborted due to ssr\n",
 					__func__, rx_hdr->ch_id);
 			mutex_unlock(&bam_pdev_mutexlock);
 			break;
@@ -1412,7 +1387,7 @@
 	for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
 		if (bam_ch_is_open(i)) {
 			bam_ch[i].notify(bam_ch[i].priv, event, data);
-			bam_dmux_log("%s: cid=%d, event=%d, data=%lu\n",
+			BAM_DMUX_LOG("%s: cid=%d, event=%d, data=%lu\n",
 					__func__, i, event, data);
 		}
 	}
@@ -1455,11 +1430,11 @@
 
 static void power_vote(int vote)
 {
-	bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
+	BAM_DMUX_LOG("%s: curr=%d, vote=%d\n", __func__,
 			bam_dmux_uplink_vote, vote);
 
 	if (bam_dmux_uplink_vote == vote)
-		bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
+		BAM_DMUX_LOG("%s: warning - duplicate power vote\n", __func__);
 
 	bam_dmux_uplink_vote = vote;
 	if (vote)
@@ -1473,7 +1448,7 @@
  */
 static inline void ul_powerdown(void)
 {
-	bam_dmux_log("%s: powerdown\n", __func__);
+	BAM_DMUX_LOG("%s: powerdown\n", __func__);
 	verify_tx_queue_is_empty(__func__);
 
 	if (a2_pc_disabled) {
@@ -1585,7 +1560,7 @@
 		}
 
 		if (ul_packet_written || atomic_read(&ul_ondemand_vote)) {
-			bam_dmux_log("%s: pkt written %d\n",
+			BAM_DMUX_LOG("%s: pkt written %d\n",
 				__func__, ul_packet_written);
 			ul_packet_written = 0;
 			schedule_delayed_work(&ul_timeout_work,
@@ -1614,7 +1589,7 @@
 
 	mutex_lock(&wakeup_lock);
 	if (bam_is_connected) { /* bam got connected before lock grabbed */
-		bam_dmux_log("%s Already awake\n", __func__);
+		BAM_DMUX_LOG("%s Already awake\n", __func__);
 		mutex_unlock(&wakeup_lock);
 		return;
 	}
@@ -1677,35 +1652,35 @@
 	 * instead of waiting
 	 */
 	if (wait_for_ack) {
-		bam_dmux_log("%s waiting for previous ack\n", __func__);
+		BAM_DMUX_LOG("%s waiting for previous ack\n", __func__);
 		ret = wait_for_completion_timeout(
 					&ul_wakeup_ack_completion, HZ);
 		wait_for_ack = 0;
 		if (unlikely(ret == 0) && ssrestart_check()) {
 			mutex_unlock(&wakeup_lock);
-			bam_dmux_log("%s timeout previous ack\n", __func__);
+			BAM_DMUX_LOG("%s timeout previous ack\n", __func__);
 			return;
 		}
 	}
 	INIT_COMPLETION(ul_wakeup_ack_completion);
 	power_vote(1);
-	bam_dmux_log("%s waiting for wakeup ack\n", __func__);
+	BAM_DMUX_LOG("%s waiting for wakeup ack\n", __func__);
 	ret = wait_for_completion_timeout(&ul_wakeup_ack_completion, HZ);
 	if (unlikely(ret == 0) && ssrestart_check()) {
 		mutex_unlock(&wakeup_lock);
-		bam_dmux_log("%s timeout wakeup ack\n", __func__);
+		BAM_DMUX_LOG("%s timeout wakeup ack\n", __func__);
 		return;
 	}
-	bam_dmux_log("%s waiting completion\n", __func__);
+	BAM_DMUX_LOG("%s waiting completion\n", __func__);
 	ret = wait_for_completion_timeout(&bam_connection_completion, HZ);
 	if (unlikely(ret == 0) && ssrestart_check()) {
 		mutex_unlock(&wakeup_lock);
-		bam_dmux_log("%s timeout power on\n", __func__);
+		BAM_DMUX_LOG("%s timeout power on\n", __func__);
 		return;
 	}
 
 	bam_is_connected = 1;
-	bam_dmux_log("%s complete\n", __func__);
+	BAM_DMUX_LOG("%s complete\n", __func__);
 	schedule_delayed_work(&ul_timeout_work,
 				msecs_to_jiffies(UL_TIMEOUT_DELAY));
 	mutex_unlock(&wakeup_lock);
@@ -1771,7 +1746,7 @@
 	/* handle disconnect during active UL */
 	write_lock_irqsave(&ul_wakeup_lock, flags);
 	if (bam_is_connected) {
-		bam_dmux_log("%s: UL active - forcing powerdown\n", __func__);
+		BAM_DMUX_LOG("%s: UL active - forcing powerdown\n", __func__);
 		ul_powerdown();
 	}
 	write_unlock_irqrestore(&ul_wakeup_lock, flags);
@@ -1817,10 +1792,10 @@
 {
 	int rc;
 
-	bam_dmux_log("%s\n", __func__);
+	BAM_DMUX_LOG("%s\n", __func__);
 	mutex_lock(&dfab_status_lock);
 	if (dfab_is_on) {
-		bam_dmux_log("%s: dfab is already on\n", __func__);
+		BAM_DMUX_LOG("%s: dfab is already on\n", __func__);
 		mutex_unlock(&dfab_status_lock);
 		return;
 	}
@@ -1842,7 +1817,7 @@
 
 static void unvote_dfab(void)
 {
-	bam_dmux_log("%s\n", __func__);
+	BAM_DMUX_LOG("%s\n", __func__);
 	mutex_lock(&dfab_status_lock);
 	if (!dfab_is_on) {
 		DMUX_LOG_KERR("%s: dfab is already off\n", __func__);
@@ -1864,7 +1839,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&wakelock_reference_lock, flags);
-	bam_dmux_log("%s: ref count = %d\n", __func__,
+	BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
 						wakelock_reference_count);
 	if (wakelock_reference_count == 0)
 		wake_lock(&bam_wakelock);
@@ -1883,7 +1858,7 @@
 		spin_unlock_irqrestore(&wakelock_reference_lock, flags);
 		return;
 	}
-	bam_dmux_log("%s: ref count = %d\n", __func__,
+	BAM_DMUX_LOG("%s: ref count = %d\n", __func__,
 						wakelock_reference_count);
 	--wakelock_reference_count;
 	if (wakelock_reference_count == 0)
@@ -1914,14 +1889,15 @@
 	 * processing.  We do not wat to access the bam hardware during SSR
 	 * because a watchdog crash from a bus stall would likely occur.
 	 */
-	if (code == SUBSYS_BEFORE_SHUTDOWN)
+	if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		in_global_reset = 1;
 		in_ssr = 1;
+		BAM_DMUX_LOG("%s: begin\n", __func__);
+		flush_workqueue(bam_mux_rx_workqueue);
+	}
 	if (code != SUBSYS_AFTER_SHUTDOWN)
 		return NOTIFY_DONE;
 
-	bam_dmux_log("%s: begin\n", __func__);
-	in_global_reset = 1;
-
 	/* Handle uplink Powerdown */
 	write_lock_irqsave(&ul_wakeup_lock, flags);
 	if (bam_is_connected) {
@@ -1977,7 +1953,7 @@
 	}
 	spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
 
-	bam_dmux_log("%s: complete\n", __func__);
+	BAM_DMUX_LOG("%s: complete\n", __func__);
 	return NOTIFY_DONE;
 }
 
@@ -2224,7 +2200,7 @@
 {
 	static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
 
-	bam_dmux_log("%s: apps ack %d->%d\n", __func__,
+	BAM_DMUX_LOG("%s: apps ack %d->%d\n", __func__,
 			clear_bit & 0x1, ~clear_bit & 0x1);
 	smsm_change_state(SMSM_APPS_STATE,
 				clear_bit & SMSM_A2_POWER_CONTROL_ACK,
@@ -2240,10 +2216,10 @@
 	mutex_lock(&smsm_cb_lock);
 	bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
 	DBG_INC_A2_POWER_CONTROL_IN_CNT();
-	bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+	BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
 			new_state);
 	if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
-		bam_dmux_log("%s: already processed this state\n", __func__);
+		BAM_DMUX_LOG("%s: already processed this state\n", __func__);
 		mutex_unlock(&smsm_cb_lock);
 		return;
 	}
@@ -2251,23 +2227,23 @@
 	last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
 
 	if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
-		bam_dmux_log("%s: reconnect\n", __func__);
+		BAM_DMUX_LOG("%s: reconnect\n", __func__);
 		grab_wakelock();
 		reconnect_to_bam();
 	} else if (bam_mux_initialized &&
 					!(new_state & SMSM_A2_POWER_CONTROL)) {
-		bam_dmux_log("%s: disconnect\n", __func__);
+		BAM_DMUX_LOG("%s: disconnect\n", __func__);
 		disconnect_to_bam();
 		release_wakelock();
 	} else if (new_state & SMSM_A2_POWER_CONTROL) {
-		bam_dmux_log("%s: init\n", __func__);
+		BAM_DMUX_LOG("%s: init\n", __func__);
 		grab_wakelock();
 		if (cpu_is_msm9615())
 			msm9615_bam_init();
 		else
 			bam_init();
 	} else {
-		bam_dmux_log("%s: bad state change\n", __func__);
+		BAM_DMUX_LOG("%s: bad state change\n", __func__);
 		pr_err("%s: unsupported state change\n", __func__);
 	}
 	mutex_unlock(&smsm_cb_lock);
@@ -2278,7 +2254,7 @@
 						uint32_t new_state)
 {
 	DBG_INC_ACK_IN_CNT();
-	bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+	BAM_DMUX_LOG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
 			new_state);
 	complete_all(&ul_wakeup_ack_completion);
 }
@@ -2321,12 +2297,12 @@
 
 	xo_clk = clk_get(&pdev->dev, "xo");
 	if (IS_ERR(xo_clk)) {
-		bam_dmux_log("%s: did not get xo clock\n", __func__);
+		BAM_DMUX_LOG("%s: did not get xo clock\n", __func__);
 		xo_clk = NULL;
 	}
 	dfab_clk = clk_get(&pdev->dev, "bus_clk");
 	if (IS_ERR(dfab_clk)) {
-		bam_dmux_log("%s: did not get dfab clock\n", __func__);
+		BAM_DMUX_LOG("%s: did not get dfab clock\n", __func__);
 		dfab_clk = NULL;
 	} else {
 		rc = clk_set_rate(dfab_clk, 64000000);
@@ -2433,7 +2409,6 @@
 	bam_ipc_log_txt = ipc_log_context_create(BAM_IPC_LOG_PAGES, "bam_dmux");
 	if (!bam_ipc_log_txt) {
 		pr_err("%s : unable to create IPC Logging Context", __func__);
-		bam_dmux_state_logging_disabled = 1;
 	}
 
 	rx_timer_interval = DEFAULT_POLLING_MIN_SLEEP;
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 0dee8f5..0f88287 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -1729,13 +1729,6 @@
 	},
 };
 
-static struct gpiomux_setting fsm8064_ep_sync_drsync_cfg = {
-	.func = GPIOMUX_FUNC_GPIO,
-	.drv = GPIOMUX_DRV_2MA,
-	.pull = GPIOMUX_PULL_UP,
-	.dir = GPIOMUX_OUT_HIGH,
-};
-
 static struct gpiomux_setting fsm8064_ep_sync_input_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_4MA,
@@ -1746,7 +1739,7 @@
 	{
 		.gpio      = 6,		/* GPSPPSIN_DRSYNC */
 		.settings = {
-			[GPIOMUX_SUSPENDED] = &fsm8064_ep_sync_drsync_cfg,
+			[GPIOMUX_SUSPENDED] = &fsm8064_ep_sync_input_cfg,
 		},
 	},
 	{
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index a1ff607..a1ed251 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -137,7 +137,7 @@
 	PM8921_GPIO_OUTPUT_VIN(14, 1, PM_GPIO_VIN_VPH),
 	/* PPS_SRC_SEL_N, chooses between WGR7640 PPS source (high) or
 	 * CW GPS module PPS source (low) */
-	PM8921_GPIO_OUTPUT_VIN(19, 1, PM_GPIO_VIN_VPH),	/* PPS_SRC_SEL_N */
+	PM8921_GPIO_OUTPUT_VIN(19, 0, PM_GPIO_VIN_VPH),	/* PPS_SRC_SEL_N */
 
 	PM8921_GPIO_OUTPUT_VIN(13, 1, PM_GPIO_VIN_VPH),	/* PCIE_CLK_PWR_EN */
 	PM8921_GPIO_OUTPUT_VIN(37, 1, PM_GPIO_VIN_VPH),	/* PCIE_RST_N */
@@ -557,4 +557,7 @@
 
 	if (!machine_is_apq8064_mtp() && !machine_is_apq8064_liquid())
 		apq8064_pm8921_chg_pdata.battery_less_hardware = 1;
+
+	if (machine_is_mpq8064_hrd())
+		apq8064_pm8921_chg_pdata.disable_chg_rmvl_wrkarnd = 1;
 }
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index beb064b..f3d648e 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -2887,6 +2887,7 @@
 #ifdef CONFIG_MSM_ROTATOR
 	&msm_rotator_device,
 #endif
+	&msm8064_cpu_slp_status,
 };
 
 static struct platform_device
@@ -4004,6 +4005,7 @@
 	.init_early = apq8064_allocate_memory_regions,
 	.init_very_early = apq8064_early_reserve,
 	.restart = msm_restart,
+	.smp = &msm8960_smp_ops,
 MACHINE_END
 
 MACHINE_START(APQ8064_MTP, "QCT APQ8064 MTP")
diff --git a/arch/arm/mach-msm/board-8226-gpiomux.c b/arch/arm/mach-msm/board-8226-gpiomux.c
index e58cee7..8be5525 100644
--- a/arch/arm/mach-msm/board-8226-gpiomux.c
+++ b/arch/arm/mach-msm/board-8226-gpiomux.c
@@ -42,6 +42,12 @@
 	.pull = GPIOMUX_PULL_NONE,
 };
 
+static struct gpiomux_setting gpio_spi_cs_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
 static struct gpiomux_setting gpio_i2c_config = {
 	.func = GPIOMUX_FUNC_3,
 	.drv = GPIOMUX_DRV_2MA,
@@ -50,37 +56,37 @@
 
 static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
 	{
-		.gpio      = 0,		/* BLSP1 QUP2 SPI_DATA_MOSI */
+		.gpio      = 0,		/* BLSP1 QUP1 SPI_DATA_MOSI */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
 		},
 	},
 	{
-		.gpio      = 1,		/* BLSP1 QUP2 SPI_DATA_MISO */
+		.gpio      = 1,		/* BLSP1 QUP1 SPI_DATA_MISO */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
 		},
 	},
 	{
-		.gpio      = 2,		/* BLSP1 QUP2 SPI_CS_N */
+		.gpio      = 2,		/* BLSP1 QUP1 SPI_CS1 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_cs_config,
+		},
+	},
+	{
+		.gpio      = 3,		/* BLSP1 QUP1 SPI_CLK */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
 		},
 	},
 	{
-		.gpio      = 3,		/* BLSP1 QUP2 SPI_CLK */
-		.settings = {
-			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
-		},
-	},
-	{
-		.gpio      = 14,		/* BLSP-1 QUP-4 I2C_SDA */
+		.gpio      = 14,	/* BLSP1 QUP4 I2C_SDA */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
 		},
 	},
 	{
-		.gpio      = 15,		/* BLSP-1 QUP-4 I2C_SCL */
+		.gpio      = 15,	/* BLSP1 QUP4 I2C_SCL */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
 		},
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 2b331d0..79ab428 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -51,6 +51,7 @@
 #include "platsmp.h"
 #include "spm.h"
 #include "lpm_resources.h"
+#include "modem_notifier.h"
 
 static struct memtype_reserve msm8226_reserve_table[] __initdata = {
 	[MEMTYPE_SMI] = {
@@ -102,6 +103,8 @@
  */
 void __init msm8226_add_drivers(void)
 {
+	msm_init_modem_notifier_list();
+	msm_smd_init();
 	msm_rpm_driver_init();
 	msm_lpmrs_module_init();
 	msm_spm_device_init();
diff --git a/arch/arm/mach-msm/board-8610-gpiomux.c b/arch/arm/mach-msm/board-8610-gpiomux.c
index 5b3d30c..15d7679 100644
--- a/arch/arm/mach-msm/board-8610-gpiomux.c
+++ b/arch/arm/mach-msm/board-8610-gpiomux.c
@@ -23,19 +23,55 @@
 	.pull = GPIOMUX_PULL_NONE,
 };
 
+static struct gpiomux_setting gpio_spi_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting gpio_spi_cs_config = {
+	.func = GPIOMUX_FUNC_1,
+	.drv = GPIOMUX_DRV_6MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
 static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
 	{
-		.gpio      = 10,		/* BLSP-1 QUP-3 I2C_SDA */
+		.gpio      = 10,	/* BLSP1 QUP3 I2C_SDA */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
 		},
 	},
 	{
-		.gpio      = 11,		/* BLSP-1 QUP-3 I2C_SCL */
+		.gpio      = 11,	/* BLSP1 QUP3 I2C_SCL */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
 		},
 	},
+	{
+		.gpio      = 0,		/* BLSP1 QUP1 SPI_DATA_MOSI */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
+		},
+	},
+	{
+		.gpio      = 1,		/* BLSP1 QUP1 SPI_DATA_MISO */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
+		},
+	},
+	{
+		.gpio      = 3,		/* BLSP1 QUP1 SPI_CLK */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_config,
+		},
+	},
+	{
+		.gpio      = 2,		/* BLSP1 QUP1 SPI_CS1 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gpio_spi_cs_config,
+		},
+	},
 };
 
 void __init msm8610_init_gpiomux(void)
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index cd292e0..4f398f4 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -374,7 +374,7 @@
 	.op_fdbck = true,
 	.ovp_val = WLED_OVP_32V,
 	.boost_curr_lim = WLED_CURR_LIMIT_525mA,
-	.num_strings = 1,
+	.strings = WLED_SECOND_STRING,
 };
 
 static int pm8038_led0_pwm_duty_pcts[56] = {
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 25ba1aa..fbcc6f1 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -2473,6 +2473,7 @@
 	&msm8930_iommu_domain_device,
 	&msm_tsens_device,
 	&msm8930_cache_dump_device,
+	&msm8930_cpu_slp_status,
 };
 
 static struct platform_device *cdp_devices[] __initdata = {
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 95f618a..3df0b38 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2955,6 +2955,7 @@
 	&msm8960_cache_dump_device,
 	&msm8960_iommu_domain_device,
 	&msm_tsens_device,
+	&msm8960_cpu_slp_status,
 };
 
 static struct platform_device *cdp_devices[] __initdata = {
@@ -3036,7 +3037,10 @@
 
 	/* Fixup data that needs to change based on GPU ID */
 	if (cpu_is_msm8960ab()) {
-		kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 0);
+		if (SOCINFO_VERSION_MINOR(soc_platform_version) == 0)
+			kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 0);
+		else
+			kgsl_3d0_pdata->chipid = ADRENO_CHIPID(3, 2, 1, 1);
 		/* 8960PRO nominal clock rate is 320Mhz */
 		kgsl_3d0_pdata->pwrlevel[1].gpu_freq = 320000000;
 #ifdef CONFIG_MSM_BUS_SCALING
diff --git a/arch/arm/mach-msm/board-8974-gpiomux.c b/arch/arm/mach-msm/board-8974-gpiomux.c
index b3cc9b7..5240f38 100644
--- a/arch/arm/mach-msm/board-8974-gpiomux.c
+++ b/arch/arm/mach-msm/board-8974-gpiomux.c
@@ -115,6 +115,7 @@
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_8MA,
 	.pull = GPIOMUX_PULL_NONE,
+	.dir = GPIOMUX_OUT_HIGH,
 };
 
 static struct gpiomux_setting lcd_en_sus_cfg = {
@@ -385,6 +386,16 @@
 	},
 };
 
+static struct msm_gpiomux_config msm_lcd_configs[] __initdata = {
+	{
+		.gpio = 58,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &lcd_en_act_cfg,
+			[GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg,
+		},
+	},
+};
+
 static struct msm_gpiomux_config msm_blsp_configs[] __initdata = {
 #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
 	{
@@ -419,13 +430,6 @@
 	},
 #endif
 	{
-		.gpio = 58,
-		.settings = {
-			[GPIOMUX_ACTIVE]    = &lcd_en_act_cfg,
-			[GPIOMUX_SUSPENDED] = &lcd_en_sus_cfg,
-		},
-	},
-	{
 		.gpio      = 6,		/* BLSP1 QUP2 I2C_DAT */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gpio_i2c_config,
@@ -1008,6 +1012,9 @@
 	msm_gpiomux_install(msm8974_pri_auxpcm_configs,
 				 ARRAY_SIZE(msm8974_pri_auxpcm_configs));
 
+	msm_gpiomux_install_nowrite(msm_lcd_configs,
+			ARRAY_SIZE(msm_lcd_configs));
+
 	if (machine_is_msm8974_rumi())
 		msm_gpiomux_install(msm_rumi_blsp_configs,
 				    ARRAY_SIZE(msm_rumi_blsp_configs));
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index cc73330..f864583 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -85,21 +85,6 @@
 	of_scan_flat_dt(dt_scan_for_memory_hole, msm8974_reserve_table);
 }
 
-static struct platform_device msm_fm_platform_init = {
-	.name  = "iris_fm",
-	.id    = -1,
-};
-
-static struct platform_device *msm_bus_8974_devices[] = {
-	&msm_fm_platform_init,
-};
-
-static void __init msm8974_init_buses(void)
-{
-	platform_add_devices(msm_bus_8974_devices,
-				ARRAY_SIZE(msm_bus_8974_devices));
-};
-
 /*
  * Used to satisfy dependencies for devices that need to be
  * run early or in a particular order. Most likely your device doesn't fall
@@ -119,7 +104,6 @@
 		msm_clock_init(&msm8974_rumi_clock_init_data);
 	else
 		msm_clock_init(&msm8974_clock_init_data);
-	msm8974_init_buses();
 	msm_thermal_device_init();
 }
 
@@ -142,34 +126,6 @@
 			"msm_sdcc.3", NULL),
 	OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98E4000, \
 			"msm_sdcc.4", NULL),
-	OF_DEV_AUXDATA("arm,coresight-tmc", 0xFC322000, \
-			"coresight-tmc-etr", NULL),
-	OF_DEV_AUXDATA("arm,coresight-tpiu", 0xFC318000, \
-			"coresight-tpiu", NULL),
-	OF_DEV_AUXDATA("qcom,coresight-replicator", 0xFC31C000, \
-			"coresight-replicator", NULL),
-	OF_DEV_AUXDATA("arm,coresight-tmc", 0xFC307000, \
-			"coresight-tmc-etf", NULL),
-	OF_DEV_AUXDATA("arm,coresight-funnel", 0xFC31B000, \
-			"coresight-funnel-merg", NULL),
-	OF_DEV_AUXDATA("arm,coresight-funnel", 0xFC319000, \
-			"coresight-funnel-in0", NULL),
-	OF_DEV_AUXDATA("arm,coresight-funnel", 0xFC31A000, \
-			"coresight-funnel-in1", NULL),
-	OF_DEV_AUXDATA("arm,coresight-funnel", 0xFC345000, \
-			"coresight-funnel-kpss", NULL),
-	OF_DEV_AUXDATA("arm,coresight-funnel", 0xFC364000, \
-			"coresight-funnel-mmss", NULL),
-	OF_DEV_AUXDATA("arm,coresight-stm", 0xFC321000, \
-			"coresight-stm", NULL),
-	OF_DEV_AUXDATA("arm,coresight-etm", 0xFC33C000, \
-			"coresight-etm0", NULL),
-	OF_DEV_AUXDATA("arm,coresight-etm", 0xFC33D000, \
-			"coresight-etm1", NULL),
-	OF_DEV_AUXDATA("arm,coresight-etm", 0xFC33E000, \
-			"coresight-etm2", NULL),
-	OF_DEV_AUXDATA("arm,coresight-etm", 0xFC33F000, \
-			"coresight-etm3", NULL),
 	OF_DEV_AUXDATA("qcom,msm-rng", 0xF9BFF000, \
 			"msm_rng", NULL),
 	OF_DEV_AUXDATA("qcom,qseecom", 0xFE806000, \
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index 7038ab9..9c9ccaa 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -93,9 +93,9 @@
 		"qup_scl" },
 	{ GPIO_CFG(61, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
 		"qup_sda" },
-	{ GPIO_CFG(131, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+	{ GPIO_CFG(131, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
 		"qup_scl" },
-	{ GPIO_CFG(132, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+	{ GPIO_CFG(132, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
 		"qup_sda" },
 };
 
@@ -104,9 +104,9 @@
 		"qup_scl" },
 	{ GPIO_CFG(61, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
 		"qup_sda" },
-	{ GPIO_CFG(131, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+	{ GPIO_CFG(131, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
 		"qup_scl" },
-	{ GPIO_CFG(132, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA),
+	{ GPIO_CFG(132, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
 		"qup_sda" },
 };
 
diff --git a/arch/arm/mach-msm/clock-8226.c b/arch/arm/mach-msm/clock-8226.c
index 6d818d8..9198976 100644
--- a/arch/arm/mach-msm/clock-8226.c
+++ b/arch/arm/mach-msm/clock-8226.c
@@ -330,7 +330,6 @@
 #define OXILI_GFX3D_CBCR                                   (0x4028)
 #define OXILICX_AXI_CBCR                                   (0x4038)
 #define OXILICX_AHB_CBCR                                   (0x403C)
-#define OCMEMCX_AHB_CBCR                                   (0x405C)
 #define MMPLL2_PLL_MODE                                    (0x4100)
 #define MMPLL2_PLL_STATUS                                  (0x411C)
 #define MMSS_MMSSNOC_AHB_CBCR                              (0x5024)
@@ -442,50 +441,9 @@
 #define GP2_CMD_RCGR                                       (0x1944)
 #define GP3_CBCR                                           (0x1980)
 #define GP3_CMD_RCGR                                       (0x1984)
-#define LPAAUDIO_PLL_MODE                                  (0x0000)
-#define LPAAUDIO_PLL_L                                     (0x0004)
-#define LPAAUDIO_PLL_M                                     (0x0008)
-#define LPAAUDIO_PLL_N                                     (0x000C)
-#define LPAAUDIO_PLL_USER_CTL                              (0x0010)
-#define LPAAUDIO_PLL_STATUS                                (0x001C)
-#define LPA_PLL_VOTE_APPS                                  (0x2000)
 #define Q6SS_BCR                                           (0x6000)
-#define AUDIO_CORE_GDSCR                                   (0x7000)
-#define LPAIF_SPKR_CMD_RCGR                                (0xA000)
-#define AUDIO_CORE_LPAIF_CODEC_SPKR_OSR_CBCR               (0xA014)
-#define AUDIO_CORE_LPAIF_CODEC_SPKR_IBIT_CBCR              (0xA018)
-#define AUDIO_CORE_LPAIF_CODEC_SPKR_EBIT_CBCR              (0xA01C)
-#define LPAIF_PRI_CMD_RCGR                                 (0xB000)
-#define AUDIO_CORE_LPAIF_PRI_OSR_CBCR                      (0xB014)
-#define AUDIO_CORE_LPAIF_PRI_IBIT_CBCR                     (0xB018)
-#define AUDIO_CORE_LPAIF_PRI_EBIT_CBCR                     (0xB01C)
-#define LPAIF_SEC_CMD_RCGR                                 (0xC000)
-#define AUDIO_CORE_LPAIF_SEC_OSR_CBCR                      (0xC014)
-#define AUDIO_CORE_LPAIF_SEC_IBIT_CBCR                     (0xC018)
-#define AUDIO_CORE_LPAIF_SEC_EBIT_CBCR                     (0xC01C)
-#define LPAIF_TER_CMD_RCGR                                 (0xD000)
-#define AUDIO_CORE_LPAIF_TER_OSR_CBCR                      (0xD014)
-#define AUDIO_CORE_LPAIF_TER_IBIT_CBCR                     (0xD018)
-#define AUDIO_CORE_LPAIF_TER_EBIT_CBCR                     (0xD01C)
-#define LPAIF_QUAD_CMD_RCGR                                (0xE000)
-#define AUDIO_CORE_LPAIF_QUAD_OSR_CBCR                     (0xE014)
-#define AUDIO_CORE_LPAIF_QUAD_IBIT_CBCR                    (0xE018)
-#define AUDIO_CORE_LPAIF_QUAD_EBIT_CBCR                    (0xE01C)
-#define LPAIF_PCM0_CMD_RCGR                                (0xF000)
-#define AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR                    (0xF014)
-#define AUDIO_CORE_LPAIF_PCM0_EBIT_CBCR                    (0xF018)
-#define LPAIF_PCM1_CMD_RCGR                                (0x10000)
-#define AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR                    (0x10014)
-#define AUDIO_CORE_LPAIF_PCM1_EBIT_CBCR                    (0x10018)
-#define SLIMBUS_CMD_RCGR                                   (0x12000)
-#define AUDIO_CORE_SLIMBUS_CORE_CBCR                       (0x12014)
-#define AUDIO_CORE_SLIMBUS_LFABIF_CBCR                     (0x12018)
-#define LPAIF_PCMOE_CMD_RCGR                               (0x13000)
-#define AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR                  (0x13014)
-#define AUDIO_CORE_IXFABRIC_CBCR                           (0x1B000)
 #define Q6SS_AHB_LFABIF_CBCR                               (0x22000)
 #define Q6SS_AHBM_CBCR                                     (0x22004)
-#define AUDIO_WRAPPER_BR_CBCR                              (0x24000)
 #define Q6SS_XO_CBCR                                       (0x26000)
 
 static unsigned int soft_vote_gpll0;
@@ -540,7 +498,8 @@
 };
 
 static struct clk_freq_tbl ftbl_gcc_blsp1_qup1_6_i2c_apps_clk[] = {
-	F_GCC(  19200000,         xo,   0,    0,    0),
+	F_GCC(  19200000,         xo,   1,    0,    0),
+	F_GCC(  50000000,      gpll0,  12,    0,    0),
 	F_END
 };
 
@@ -1375,17 +1334,6 @@
 	},
 };
 
-static struct branch_clk gcc_mmss_noc_cfg_ahb_clk = {
-	.cbcr_reg = MMSS_NOC_CFG_AHB_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[GCC_BASE],
-	.c = {
-		.dbg_name = "gcc_mmss_noc_cfg_ahb_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(gcc_mmss_noc_cfg_ahb_clk.c),
-	},
-};
-
 static struct branch_clk gcc_mss_cfg_ahb_clk = {
 	.cbcr_reg = MSS_CFG_AHB_CBCR,
 	.has_sibling = 1,
@@ -1633,7 +1581,6 @@
 static struct measure_mux_entry measure_mux_GCC[] = {
 	{ &gcc_periph_noc_ahb_clk.c,  GCC_BASE, 0x0010 },
 	{ &gcc_noc_conf_xpu_ahb_clk.c,  GCC_BASE, 0x0018 },
-	{ &gcc_mmss_noc_cfg_ahb_clk.c,  GCC_BASE, 0x002a },
 	{ &gcc_mss_cfg_ahb_clk.c,  GCC_BASE, 0x0030 },
 	{ &gcc_mss_q6_bimc_axi_clk.c,  GCC_BASE, 0x0031 },
 	{ &gcc_usb_hsic_ahb_clk.c,  GCC_BASE, 0x0058 },
@@ -1733,24 +1680,11 @@
 		.dbg_name = "axi_clk_src",
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP3(LOW, 100000000, NOMINAL, 200000000, HIGH,
-		266670000),
+			266670000),
 		CLK_INIT(axi_clk_src.c),
 	},
 };
 
-static struct pll_clk mmpll2_pll = {
-	.mode_reg = (void __iomem *)MMPLL2_PLL_MODE,
-	.status_reg = (void __iomem *)MMPLL2_PLL_STATUS,
-	.base = &virt_bases[MMSS_BASE],
-	.c = {
-		.dbg_name = "mmpll2_pll",
-		.parent = &xo.c,
-		.rate = 900000000,
-		.ops = &clk_ops_local_pll,
-		CLK_INIT(mmpll2_pll.c),
-	},
-};
-
 static struct clk_freq_tbl ftbl_camss_csi0_1_clk[] = {
 	F_MMSS( 100000000,      gpll0,   6,    0,    0),
 	F_MMSS( 200000000, mmpll0_pll,   4,    0,    0),
@@ -1810,7 +1744,7 @@
 		.dbg_name = "vfe0_clk_src",
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP3(LOW, 133330000, NOMINAL, 266670000, HIGH,
-		320000000),
+			320000000),
 		CLK_INIT(vfe0_clk_src.c),
 	},
 };
@@ -1837,7 +1771,7 @@
 		.dbg_name = "mdp_clk_src",
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP3(LOW, 92310000, NOMINAL, 177780000, HIGH,
-		200000000),
+			200000000),
 		CLK_INIT(mdp_clk_src.c),
 	},
 };
@@ -1862,7 +1796,7 @@
 		.dbg_name = "jpeg0_clk_src",
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP3(LOW, 133330000, NOMINAL, 266670000, HIGH,
-		320000000),
+			320000000),
 		CLK_INIT(jpeg0_clk_src.c),
 	},
 };
@@ -1891,6 +1825,7 @@
 	F_MMSS(  66700000,      gpll0,   9,    0,    0),
 	F_MMSS( 100000000,      gpll0,   6,    0,    0),
 	F_MMSS( 133330000, mmpll0_pll,   6,    0,    0),
+	F_MMSS( 160000000, mmpll0_pll,   5,    0,    0),
 	F_END
 };
 
@@ -1904,7 +1839,7 @@
 		.dbg_name = "vcodec0_clk_src",
 		.ops = &clk_ops_rcg_mnd,
 		VDD_DIG_FMAX_MAP3(LOW, 66670000, NOMINAL, 133330000, HIGH,
-		160000000),
+			160000000),
 		CLK_INIT(vcodec0_clk_src.c),
 	},
 };
@@ -2050,7 +1985,7 @@
 		.dbg_name = "cpp_clk_src",
 		.ops = &clk_ops_rcg,
 		VDD_DIG_FMAX_MAP3(LOW, 133330000, NOMINAL, 266670000, HIGH,
-		320000000),
+			320000000),
 		CLK_INIT(cpp_clk_src.c),
 	},
 };
@@ -2570,17 +2505,6 @@
 	},
 };
 
-static struct branch_clk mmss_mmssnoc_ahb_clk = {
-	.cbcr_reg = MMSS_MMSSNOC_AHB_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[MMSS_BASE],
-	.c = {
-		.dbg_name = "mmss_mmssnoc_ahb_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(mmss_mmssnoc_ahb_clk.c),
-	},
-};
-
 static struct branch_clk mmss_mmssnoc_bto_ahb_clk = {
 	.cbcr_reg = MMSS_MMSSNOC_BTO_AHB_CBCR,
 	.has_sibling = 1,
@@ -2618,17 +2542,6 @@
 	},
 };
 
-static struct branch_clk ocmemcx_ahb_clk = {
-	.cbcr_reg = OCMEMCX_AHB_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[MMSS_BASE],
-	.c = {
-		.dbg_name = "ocmemcx_ahb_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(ocmemcx_ahb_clk.c),
-	},
-};
-
 static struct branch_clk oxili_gfx3d_clk = {
 	.cbcr_reg = OXILI_GFX3D_CBCR,
 	.has_sibling = 1,
@@ -2639,7 +2552,6 @@
 		.parent = &gfx3d_clk_src.c,
 		.ops = &clk_ops_branch,
 		CLK_INIT(oxili_gfx3d_clk.c),
-		.depends = &oxilicx_axi_clk.c,
 	},
 };
 
@@ -2702,12 +2614,10 @@
 };
 
 static struct measure_mux_entry measure_mux_MMSS[] = {
-	{ &mmss_mmssnoc_ahb_clk.c,  MMSS_BASE, 0x0001 },
 	{ &mmss_mmssnoc_bto_ahb_clk.c,  MMSS_BASE, 0x0002 },
 	{ &mmss_misc_ahb_clk.c,  MMSS_BASE, 0x0003 },
 	{ &mmss_mmssnoc_axi_clk.c,  MMSS_BASE, 0x0004 },
 	{ &mmss_s0_axi_clk.c,  MMSS_BASE, 0x0005 },
-	{ &ocmemcx_ahb_clk.c,  MMSS_BASE, 0x000a },
 	{ &oxilicx_axi_clk.c,  MMSS_BASE, 0x000b },
 	{ &oxilicx_ahb_clk.c,  MMSS_BASE, 0x000c },
 	{ &oxili_gfx3d_clk.c,  MMSS_BASE, 0x000d },
@@ -2755,226 +2665,6 @@
 	{&dummy_clk, N_BASES, 0x0000},
 };
 
-static struct pll_vote_clk lpaaudio_pll = {
-	.en_reg = (void __iomem *)LPA_PLL_VOTE_APPS,
-	.en_mask = BIT(0),
-	.status_reg = (void __iomem *)LPAAUDIO_PLL_STATUS,
-	.status_mask = BIT(17),
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.rate = 491520000,
-		.parent = &xo.c,
-		.dbg_name = "lpaaudio_pll",
-		.ops = &clk_ops_pll_vote,
-		CLK_INIT(lpaaudio_pll.c),
-	},
-};
-
-static struct clk_freq_tbl ftbl_audio_core_lpaif__osr_clk[] = {
-	F_LPASS(    512000, lpaaudio_pll,  16,    1,   60),
-	F_LPASS(    768000, lpaaudio_pll,  16,    1,   40),
-	F_LPASS(   1024000, lpaaudio_pll,  16,    1,   30),
-	F_LPASS(   1536000, lpaaudio_pll,  16,    1,   20),
-	F_LPASS(   2048000, lpaaudio_pll,  16,    1,   15),
-	F_LPASS(   3072000, lpaaudio_pll,  16,    1,   10),
-	F_LPASS(   4096000, lpaaudio_pll,  15,    1,    8),
-	F_LPASS(   6144000, lpaaudio_pll,  10,    1,    8),
-	F_LPASS(   8192000, lpaaudio_pll,  15,    1,    4),
-	F_LPASS(  12288000, lpaaudio_pll,  10,    1,    4),
-	F_END
-};
-
-static struct rcg_clk lpaif_pri_clk_src = {
-	.cmd_rcgr_reg = LPAIF_PRI_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif__osr_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_pri_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12290000, NOMINAL, 24580000),
-		CLK_INIT(lpaif_pri_clk_src.c),
-	},
-};
-
-static struct rcg_clk lpaif_quad_clk_src = {
-	.cmd_rcgr_reg = LPAIF_QUAD_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif__osr_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_quad_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12290000, NOMINAL, 24580000),
-		CLK_INIT(lpaif_quad_clk_src.c),
-	},
-};
-
-static struct rcg_clk lpaif_sec_clk_src = {
-	.cmd_rcgr_reg = LPAIF_SEC_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif__osr_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_sec_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12290000, NOMINAL, 24580000),
-		CLK_INIT(lpaif_sec_clk_src.c),
-	},
-};
-
-static struct rcg_clk lpaif_spkr_clk_src = {
-	.cmd_rcgr_reg = LPAIF_SPKR_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif__osr_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_spkr_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12290000, NOMINAL, 24580000),
-		CLK_INIT(lpaif_spkr_clk_src.c),
-	},
-};
-
-static struct rcg_clk lpaif_ter_clk_src = {
-	.cmd_rcgr_reg = LPAIF_TER_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif__osr_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_ter_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12290000, NOMINAL, 24580000),
-		CLK_INIT(lpaif_ter_clk_src.c),
-	},
-};
-
-static struct clk_freq_tbl ftbl_audio_core_lpaif_pcmoe_clk[] = {
-	F_LPASS(    512000, lpaaudio_pll,  16,    1,   60),
-	F_LPASS(    768000, lpaaudio_pll,  16,    1,   40),
-	F_LPASS(   1024000, lpaaudio_pll,  16,    1,   30),
-	F_LPASS(   1536000, lpaaudio_pll,  16,    1,   20),
-	F_LPASS(   2048000, lpaaudio_pll,  16,    1,   15),
-	F_LPASS(   3072000, lpaaudio_pll,  16,    1,   10),
-	F_LPASS(   4096000, lpaaudio_pll,  15,    1,    8),
-	F_LPASS(   6144000, lpaaudio_pll,  10,    1,    8),
-	F_LPASS(   8192000, lpaaudio_pll,  15,    1,    4),
-	F_LPASS(  12288000, lpaaudio_pll,  10,    1,    4),
-	F_END
-};
-
-static struct rcg_clk lpaif_pcmoe_clk_src = {
-	.cmd_rcgr_reg = LPAIF_PCMOE_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_pcmoe_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_pcmoe_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12290000, NOMINAL, 24580000),
-		CLK_INIT(lpaif_pcmoe_clk_src.c),
-	},
-};
-
-static struct clk_freq_tbl ftbl_audio_core_lpaif_pcm0_1_ibit_clk[] = {
-	F_LPASS(    512000, lpaaudio_pll,  16,    1,   60),
-	F_LPASS(    768000, lpaaudio_pll,  16,    1,   40),
-	F_LPASS(   1024000, lpaaudio_pll,  16,    1,   30),
-	F_LPASS(   1536000, lpaaudio_pll,  16,    1,   20),
-	F_LPASS(   2048000, lpaaudio_pll,  16,    1,   15),
-	F_LPASS(   3072000, lpaaudio_pll,  16,    1,   10),
-	F_LPASS(   4096000, lpaaudio_pll,  15,    1,    8),
-	F_LPASS(   6144000, lpaaudio_pll,  10,    1,    8),
-	F_LPASS(   8192000, lpaaudio_pll,  15,    1,    4),
-	F_END
-};
-
-static struct rcg_clk lpaif_pcm0_clk_src = {
-	.cmd_rcgr_reg = LPAIF_PCM0_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_pcm0_1_ibit_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_pcm0_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 4100000, NOMINAL, 8190000),
-		CLK_INIT(lpaif_pcm0_clk_src.c),
-	},
-};
-
-static struct rcg_clk lpaif_pcm1_clk_src = {
-	.cmd_rcgr_reg = LPAIF_PCM1_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_pcm0_1_ibit_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "lpaif_pcm1_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 4100000, NOMINAL, 8190000),
-		CLK_INIT(lpaif_pcm1_clk_src.c),
-	},
-};
-
-static struct clk_freq_tbl ftbl_audio_core_slimbus_core_clk[] = {
-	F_LPASS(  24576000, lpaaudio_pll,  10,    1,    2),
-	F_END
-};
-
-static struct rcg_clk slimbus_clk_src = {
-	.cmd_rcgr_reg = SLIMBUS_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_slimbus_core_clk,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "slimbus_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 13000000, NOMINAL, 26010000),
-		CLK_INIT(slimbus_clk_src.c),
-	},
-};
-
-static struct branch_clk audio_core_ixfabric_clk = {
-	.cbcr_reg = AUDIO_CORE_IXFABRIC_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_ixfabric_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_ixfabric_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_slimbus_lfabif_clk = {
-	.cbcr_reg = AUDIO_CORE_SLIMBUS_LFABIF_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_slimbus_lfabif_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_slimbus_lfabif_clk.c),
-	},
-};
-
-static struct branch_clk audio_wrapper_br_clk = {
-	.cbcr_reg = AUDIO_WRAPPER_BR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_wrapper_br_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_wrapper_br_clk.c),
-	},
-};
-
 static struct branch_clk q6ss_ahb_lfabif_clk = {
 	.cbcr_reg = Q6SS_AHB_LFABIF_CBCR,
 	.has_sibling = 1,
@@ -2997,77 +2687,6 @@
 	},
 };
 
-static struct branch_clk audio_core_lpaif_pcmoe_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcmoe_clk",
-		.parent = &lpaif_pcmoe_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcmoe_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pri_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pri_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pri_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pri_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_IBIT_CBCR,
-	.has_sibling = 1,
-	.max_div = 15,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pri_ibit_clk",
-		.parent = &lpaif_pri_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pri_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pri_osr_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_OSR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pri_osr_clk",
-		.parent = &lpaif_pri_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pri_osr_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm0_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM0_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm0_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm0_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm0_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm0_ibit_clk",
-		.parent = &lpaif_pcm0_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm0_ibit_clk.c),
-	},
-};
-
 static struct branch_clk q6ss_xo_clk = {
 	.cbcr_reg = Q6SS_XO_CBCR,
 	.has_sibling = 1,
@@ -3081,203 +2700,10 @@
 	},
 };
 
-static struct branch_clk audio_core_lpaif_quad_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_QUAD_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_quad_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_quad_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_quad_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_QUAD_IBIT_CBCR,
-	.has_sibling = 1,
-	.max_div = 15,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_quad_ibit_clk",
-		.parent = &lpaif_quad_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_quad_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_quad_osr_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_QUAD_OSR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_quad_osr_clk",
-		.parent = &lpaif_quad_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_quad_osr_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_sec_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_sec_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_sec_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_sec_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_IBIT_CBCR,
-	.has_sibling = 1,
-	.max_div = 15,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_sec_ibit_clk",
-		.parent = &lpaif_sec_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_sec_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_sec_osr_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_OSR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_sec_osr_clk",
-		.parent = &lpaif_sec_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_sec_osr_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm1_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM1_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm1_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm1_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm1_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm1_ibit_clk",
-		.parent = &lpaif_pcm1_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm1_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_slimbus_core_clk = {
-	.cbcr_reg = AUDIO_CORE_SLIMBUS_CORE_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_slimbus_core_clk",
-		.parent = &slimbus_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_slimbus_core_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_codec_spkr_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_codec_spkr_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_codec_spkr_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_codec_spkr_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_IBIT_CBCR,
-	.has_sibling = 1,
-	.max_div = 15,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_codec_spkr_ibit_clk",
-		.parent = &lpaif_spkr_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_codec_spkr_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_codec_spkr_osr_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_CODEC_SPKR_OSR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_codec_spkr_osr_clk",
-		.parent = &lpaif_spkr_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_codec_spkr_osr_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_ter_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_TER_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_ter_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_ter_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_ter_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_TER_IBIT_CBCR,
-	.has_sibling = 1,
-	.max_div = 15,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_ter_ibit_clk",
-		.parent = &lpaif_ter_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_ter_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_ter_osr_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_TER_OSR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_ter_osr_clk",
-		.parent = &lpaif_ter_clk_src.c,
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_ter_osr_clk.c),
-	},
-};
-
 static struct measure_mux_entry measure_mux_LPASS[] = {
-	{ &lpaif_pcmoe_clk_src.c,  LPASS_BASE, 0x000f },
-	{ &slimbus_clk_src.c,  LPASS_BASE, 0x0011 },
-	{ &lpaif_pcm1_clk_src.c,  LPASS_BASE, 0x0012 },
-	{ &lpaif_pcm0_clk_src.c,  LPASS_BASE, 0x0013 },
-	{ &lpaif_quad_clk_src.c,  LPASS_BASE, 0x0014 },
-	{ &lpaif_ter_clk_src.c,  LPASS_BASE, 0x0015 },
-	{ &lpaif_sec_clk_src.c,  LPASS_BASE, 0x0016 },
-	{ &lpaif_pri_clk_src.c,  LPASS_BASE, 0x0017 },
-	{ &lpaif_spkr_clk_src.c,  LPASS_BASE, 0x0018 },
 	{ &q6ss_ahbm_clk.c,  LPASS_BASE, 0x001d },
 	{ &q6ss_ahb_lfabif_clk.c,  LPASS_BASE, 0x001e },
-	{ &audio_wrapper_br_clk.c,  LPASS_BASE, 0x0022 },
 	{ &q6ss_xo_clk.c,  LPASS_BASE, 0x002b },
-	{ &audio_core_lpaif_pcmoe_clk.c,  LPASS_BASE, 0x0030 },
-	{ &audio_core_slimbus_core_clk.c,  LPASS_BASE, 0x003d },
-	{ &audio_core_slimbus_lfabif_clk.c,  LPASS_BASE, 0x003e },
-	{ &audio_core_ixfabric_clk.c,  LPASS_BASE, 0x0059 },
 	{&dummy_clk, N_BASES, 0x0000},
 };
 
@@ -3637,37 +3063,37 @@
 	CLK_LOOKUP("ocmem_a_clk", ocmemgx_msmbus_a_clk.c, "msm_bus"),
 	CLK_LOOKUP("bus_clk",	mmss_s0_axi_clk.c,	"msm_mmss_noc"),
 	CLK_LOOKUP("bus_a_clk",	mmss_s0_axi_clk.c,	"msm_mmss_noc"),
-	CLK_LOOKUP("iface_clk", gcc_mmss_noc_cfg_ahb_clk.c, ""),
 
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etr"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tpiu"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-replicator"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etf"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-merg"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in0"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in1"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-kpss"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-mmss"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-stm"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm0"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm1"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm2"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm3"),
+	/* CoreSight clocks */
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc322000.tmc"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc318000.tpiu"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31c000.replicator"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc307000.tmc"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31b000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc319000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31a000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc345000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc364000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc321000.stm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33c000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33d000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33e000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33f000.etm"),
 
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etr"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tpiu"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-replicator"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etf"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-merg"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in0"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in1"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-kpss"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-mmss"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-stm"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm0"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm1"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm2"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm3"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc322000.tmc"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc318000.tpiu"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31c000.replicator"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc307000.tmc"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31b000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc319000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31a000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc345000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc364000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc321000.stm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33c000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33d000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33e000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33f000.etm"),
 
 	/* HSUSB-OTG Clocks */
 	CLK_LOOKUP("xo",                          xo.c, "f9a55000.usb"),
@@ -3691,8 +3117,8 @@
 	CLK_LOOKUP("iface_clk",       gcc_blsp1_ahb_clk.c, "f995e000.serial"),
 	CLK_LOOKUP("core_clk", gcc_blsp1_uart2_apps_clk.c, "f995e000.serial"),
 
-	CLK_LOOKUP("iface_clk",          gcc_blsp1_ahb_clk.c, "f9928000.spi"),
-	CLK_LOOKUP("core_clk", gcc_blsp1_qup1_spi_apps_clk.c, "f9928000.spi"),
+	CLK_LOOKUP("iface_clk",          gcc_blsp1_ahb_clk.c, "f9923000.spi"),
+	CLK_LOOKUP("core_clk", gcc_blsp1_qup1_spi_apps_clk.c, "f9923000.spi"),
 
 	CLK_LOOKUP("core_clk",     gcc_ce1_clk.c,         "qseecom"),
 	CLK_LOOKUP("iface_clk",    gcc_ce1_ahb_clk.c,     "qseecom"),
@@ -3733,8 +3159,6 @@
 	CLK_LOOKUP("gpll1", gpll1.c, ""),
 	CLK_LOOKUP("mmpll0", mmpll0_pll.c, ""),
 	CLK_LOOKUP("mmpll1", mmpll1_pll.c, ""),
-	CLK_LOOKUP("mmpll2", mmpll2_pll.c, ""),
-	CLK_LOOKUP("lpaaudio_pll", lpaaudio_pll.c, ""),
 
 	CLK_LOOKUP("core_clk", gcc_blsp1_qup1_i2c_apps_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_blsp1_qup2_i2c_apps_clk.c, ""),
@@ -3890,7 +3314,6 @@
 	CLK_LOOKUP("core_clk", oxilicx_axi_clk.c, "fdb10000.qcom,iommu"),
 
 	CLK_LOOKUP("core_clk", ocmemgx_core_clk.c, "fdd00000.qcom,ocmem"),
-	CLK_LOOKUP("br_clk", audio_wrapper_br_clk.c, "fdd00000.qcom,ocmem"),
 
 	/* Venus Clocks */
 	CLK_LOOKUP("core_clk", venus0_vcodec0_clk.c, "fdc00000.qcom,vidc"),
@@ -3906,69 +3329,9 @@
 	CLK_LOOKUP("cam_gp1_clk", camss_gp1_clk.c, ""),
 	CLK_LOOKUP("iface_clk", camss_micro_ahb_clk.c, ""),
 
-	CLK_LOOKUP("", mmss_mmssnoc_ahb_clk.c, ""),
 	CLK_LOOKUP("", mmss_mmssnoc_bto_ahb_clk.c, ""),
 	CLK_LOOKUP("", mmss_mmssnoc_axi_clk.c, ""),
 	CLK_LOOKUP("", mmss_s0_axi_clk.c, ""),
-	CLK_LOOKUP("", ocmemcx_ahb_clk.c, ""),
-
-	/* LPASS CLOCKS */
-	CLK_LOOKUP("core_clk", audio_core_slimbus_core_clk.c, "fe12f000.slim"),
-	CLK_LOOKUP("iface_clk", audio_core_slimbus_lfabif_clk.c,
-			"fe12f000.slim"),
-
-	CLK_LOOKUP("core_clk", lpaif_quad_clk_src.c,
-			"msm-dai-q6-mi2s.3"),
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_quad_osr_clk.c,
-			"msm-dai-q6-mi2s.3"),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_quad_ebit_clk.c,
-			"msm-dai-q6-mi2s.3"),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_quad_ibit_clk.c,
-			"msm-dai-q6-mi2s.3"),
-
-	CLK_LOOKUP("pcm_clk", lpaif_pcm0_clk_src.c,
-						"msm-dai-q6.4106"),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c,
-						"msm-dai-q6.4106"),
-	CLK_LOOKUP("core_oe_src_clk", lpaif_pcmoe_clk_src.c,
-						"msm-dai-q6.4106"),
-	CLK_LOOKUP("core_oe_clk", audio_core_lpaif_pcmoe_clk.c,
-						"msm-dai-q6.4106"),
-
-	CLK_LOOKUP("pcm_clk", lpaif_pcm0_clk_src.c,
-						"msm-dai-q6.4107"),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c,
-						"msm-dai-q6.4107"),
-	CLK_LOOKUP("core_oe_src_clk", lpaif_pcmoe_clk_src.c,
-						"msm-dai-q6.4107"),
-	CLK_LOOKUP("core_oe_clk", audio_core_lpaif_pcmoe_clk.c,
-						"msm-dai-q6.4107"),
-
-
-	CLK_LOOKUP("bus_clk", audio_core_ixfabric_clk.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm0_ebit_clk.c, ""),
-	CLK_LOOKUP("core_clk", lpaif_pcm1_clk_src.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk", lpaif_spkr_clk_src.c, ""),
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_codec_spkr_osr_clk.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_codec_spkr_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_codec_spkr_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk", lpaif_pri_clk_src.c, ""),
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_pri_osr_clk.c, ""),
-
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pri_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pri_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk", lpaif_sec_clk_src.c, ""),
-
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_sec_osr_clk.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_sec_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_sec_ibit_clk.c, ""),
-
-	CLK_LOOKUP("core_clk", lpaif_ter_clk_src.c, ""),
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_ter_osr_clk.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_ter_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_ter_ibit_clk.c, ""),
 };
 
 static struct clk_lookup msm_clocks_8226_rumi[] = {
@@ -4091,32 +3454,6 @@
 	.main_output_mask = BIT(0),
 };
 
-static struct pll_config_regs lpapll0_regs __initdata = {
-	.l_reg = (void __iomem *)LPAAUDIO_PLL_L,
-	.m_reg = (void __iomem *)LPAAUDIO_PLL_M,
-	.n_reg = (void __iomem *)LPAAUDIO_PLL_N,
-	.config_reg = (void __iomem *)LPAAUDIO_PLL_USER_CTL,
-	.mode_reg = (void __iomem *)LPAAUDIO_PLL_MODE,
-	.base = &virt_bases[LPASS_BASE],
-};
-
-/* LPAPLL0 at 491.52 MHz, main output enabled. */
-static struct pll_config lpapll0_config __initdata = {
-	.l = 0x33,
-	.m = 0x1,
-	.n = 0x5,
-	.vco_val = 0x0,
-	.vco_mask = BM(21, 20),
-	.pre_div_val = BVAL(14, 12, 0x1),
-	.pre_div_mask = BM(14, 12),
-	.post_div_val = 0x0,
-	.post_div_mask = BM(9, 8),
-	.mn_ena_val = BIT(24),
-	.mn_ena_mask = BIT(24),
-	.main_output_val = BIT(0),
-	.main_output_mask = BIT(0),
-};
-
 #define PLL_AUX_OUTPUT_BIT 1
 #define PLL_AUX2_OUTPUT_BIT 2
 
@@ -4142,8 +3479,7 @@
 
 static void __init reg_init(void)
 {
-	u32 regval, status;
-	int ret;
+	u32 regval;
 
 	if (!(readl_relaxed(GCC_REG_BASE(GPLL0_STATUS))
 			& gpll0.status_mask))
@@ -4155,7 +3491,6 @@
 
 	configure_sr_hpm_lp_pll(&mmpll0_config, &mmpll0_regs, 1);
 	configure_sr_hpm_lp_pll(&mmpll1_config, &mmpll1_regs, 1);
-	configure_sr_hpm_lp_pll(&lpapll0_config, &lpapll0_regs, 1);
 
 	/* Enable GPLL0's aux outputs. */
 	regval = readl_relaxed(GCC_REG_BASE(GPLL0_USER_CTL));
@@ -4172,32 +3507,8 @@
 	 * register.
 	 */
 	writel_relaxed(0x0, GCC_REG_BASE(APCS_CLOCK_SLEEP_ENA_VOTE));
-
-	/*
-	 * TODO: The following sequence enables the LPASS audio core GDSC.
-	 * Remove when this becomes unnecessary.
-	 */
-
-	/*
-	 * Disable HW trigger: collapse/restore occur based on registers writes
-	 * Disable SW override: Use hardware state-machine for sequencing.
-	 */
-	regval = readl_relaxed(LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
-
-	/* Configure wait time between states. */
-	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
-	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
-	writel_relaxed(regval, LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-
-	regval = readl_relaxed(LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-	regval &= ~BIT(0);
-	writel_relaxed(regval, LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-
-	ret = readl_poll_timeout(LPASS_REG_BASE(AUDIO_CORE_GDSCR), status,
-				status & PWR_ON_MASK, 50, GDSC_TIMEOUT_US);
-	WARN(ret, "LPASS Audio Core GDSC did not power on.\n");
 }
+
 static void __init msm8226_clock_post_init(void)
 {
 
@@ -4216,8 +3527,6 @@
 	clk_set_rate(&mclk1_clk_src.c, mclk1_clk_src.freq_tbl[0].freq_hz);
 	clk_set_rate(&esc0_clk_src.c, esc0_clk_src.freq_tbl[0].freq_hz);
 	clk_set_rate(&vsync_clk_src.c, vsync_clk_src.freq_tbl[0].freq_hz);
-	clk_set_rate(&slimbus_clk_src.c,
-			slimbus_clk_src.freq_tbl[0].freq_hz);
 }
 
 #define GCC_CC_PHYS		0xFC400000
@@ -4311,13 +3620,6 @@
 	clk_set_rate(&axi_clk_src.c, 200000000);
 	clk_prepare_enable(&mmss_s0_axi_clk.c);
 
-
-	/* TODO: Delete this code once bootloaders enable this clk
-	 * Temporarily enable a clock to allow access to LPASS core
-	 * registers. Possibly requires gdsc to be enabled.
-	 */
-	clk_prepare_enable(&audio_core_ixfabric_clk.c);
-
 	/*
 	 * TODO: Enable the gcc_bimc_clk smcbc, which is the parent of thhe
 	 * mss_gcc_q6_bimc_axi_clk
diff --git a/arch/arm/mach-msm/clock-8610.c b/arch/arm/mach-msm/clock-8610.c
index 55427b5..5690730 100644
--- a/arch/arm/mach-msm/clock-8610.c
+++ b/arch/arm/mach-msm/clock-8610.c
@@ -3097,10 +3097,10 @@
 	CLK_LOOKUP("core_clk_src",          sdcc1_apps_clk_src.c, ""),
 	CLK_LOOKUP("core_clk_src",          sdcc2_apps_clk_src.c, ""),
 	CLK_LOOKUP("core_clk_src",       usb_hs_system_clk_src.c, ""),
-
 	CLK_LOOKUP("iface_clk",           gcc_blsp1_ahb_clk.c, "f9925000.i2c"),
+	CLK_LOOKUP("iface_clk",           gcc_blsp1_ahb_clk.c, "f9923000.spi"),
 	CLK_LOOKUP("core_clk",  gcc_blsp1_qup1_i2c_apps_clk.c, ""),
-	CLK_LOOKUP("core_clk",  gcc_blsp1_qup1_spi_apps_clk.c, ""),
+	CLK_LOOKUP("core_clk",  gcc_blsp1_qup1_spi_apps_clk.c, "f9923000.spi"),
 	CLK_LOOKUP("core_clk",  gcc_blsp1_qup2_i2c_apps_clk.c, ""),
 	CLK_LOOKUP("core_clk",  gcc_blsp1_qup2_spi_apps_clk.c, ""),
 	CLK_LOOKUP("core_clk",  gcc_blsp1_qup3_i2c_apps_clk.c, "f9925000.i2c"),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index d26b4b2..4cef377 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -809,6 +809,7 @@
 static DEFINE_CLK_BRANCH_VOTER(cxo_wlan_clk, &cxo_clk_src.c);
 static DEFINE_CLK_BRANCH_VOTER(cxo_pil_pronto_clk, &cxo_clk_src.c);
 static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_ehci_host_clk, &cxo_clk_src.c);
 
 static struct clk_freq_tbl ftbl_gcc_usb30_master_clk[] = {
 	F(125000000,  gpll0,   1,   5,  24),
@@ -3008,7 +3009,9 @@
 	},
 };
 
+static struct branch_clk mdss_ahb_clk;
 static struct clk dsipll0_byte_clk_src = {
+	.depends = &mdss_ahb_clk.c,
 	.parent = &cxo_clk_src.c,
 	.dbg_name = "dsipll0_byte_clk_src",
 	.ops = &clk_ops_dsi_byte_pll,
@@ -3016,6 +3019,7 @@
 };
 
 static struct clk dsipll0_pixel_clk_src = {
+	.depends = &mdss_ahb_clk.c,
 	.parent = &cxo_clk_src.c,
 	.dbg_name = "dsipll0_pixel_clk_src",
 	.ops = &clk_ops_dsi_pixel_pll,
@@ -3034,11 +3038,40 @@
 static struct clk_ops clk_ops_pixel;
 
 #define CFG_RCGR_DIV_MASK		BM(4, 0)
+#define CMD_RCGR_REG(x)			(*(x)->base + (x)->cmd_rcgr_reg + 0x0)
+#define CFG_RCGR_REG(x)			(*(x)->base + (x)->cmd_rcgr_reg + 0x4)
+#define M_REG(x)			(*(x)->base + (x)->cmd_rcgr_reg + 0x8)
+#define N_REG(x)			(*(x)->base + (x)->cmd_rcgr_reg + 0xC)
+#define MND_MODE_MASK			BM(13, 12)
+#define MND_DUAL_EDGE_MODE_BVAL		BVAL(13, 12, 0x2)
+#define CFG_RCGR_SRC_SEL_MASK		BM(10, 8)
+#define CMD_RCGR_ROOT_STATUS_BIT	BIT(31)
+
+static enum handoff byte_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
 
 static int set_rate_byte(struct clk *clk, unsigned long rate)
 {
 	struct rcg_clk *rcg = to_rcg_clk(clk);
-	struct clk *pll = &dsipll0_byte_clk_src;
+	struct clk *pll = clk->parent;
 	unsigned long source_rate, div;
 	int rc;
 
@@ -3059,15 +3092,48 @@
 
 	byte_freq.div_src_val &= ~CFG_RCGR_DIV_MASK;
 	byte_freq.div_src_val |= BVAL(4, 0, div);
-	set_rate_mnd(rcg, &byte_freq);
+	set_rate_hid(rcg, &byte_freq);
 
 	return 0;
 }
 
+static enum handoff pixel_rcg_handoff(struct clk *clk)
+{
+	struct rcg_clk *rcg = to_rcg_clk(clk);
+	u32 div_val, mval, nval, cfg_regval;
+	unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+	cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+	/* If the pre-divider is used, find the rate after the division */
+	div_val = cfg_regval & CFG_RCGR_DIV_MASK;
+	if (div_val > 1)
+		pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+	else
+		pre_div_rate = parent_rate;
+
+	clk->rate = pre_div_rate;
+
+	/* If MND is used, find the rate after the MND division */
+	if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+		mval = readl_relaxed(M_REG(rcg));
+		nval = readl_relaxed(N_REG(rcg));
+		if (!nval)
+			return HANDOFF_DISABLED_CLK;
+		nval = (~nval) + mval;
+		clk->rate = (pre_div_rate * mval) / nval;
+	}
+
+	if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+		return HANDOFF_DISABLED_CLK;
+
+	return HANDOFF_ENABLED_CLK;
+}
+
 static int set_rate_pixel(struct clk *clk, unsigned long rate)
 {
 	struct rcg_clk *rcg = to_rcg_clk(clk);
-	struct clk *pll = &dsipll0_pixel_clk_src;
+	struct clk *pll = clk->parent;
 	unsigned long source_rate, div;
 	int rc;
 
@@ -3088,7 +3154,7 @@
 
 	pixel_freq.div_src_val &= ~CFG_RCGR_DIV_MASK;
 	pixel_freq.div_src_val |= BVAL(4, 0, div);
-	set_rate_hid(rcg, &pixel_freq);
+	set_rate_mnd(rcg, &pixel_freq);
 
 	return 0;
 }
@@ -4828,6 +4894,7 @@
 	CLK_LOOKUP("xo",       cxo_wlan_clk.c, "fb000000.qcom,wcnss-wlan"),
 	CLK_LOOKUP("xo", cxo_pil_pronto_clk.c,     "fb21b000.qcom,pronto"),
 	CLK_LOOKUP("xo",       cxo_dwc3_clk.c,                 "msm_dwc3"),
+	CLK_LOOKUP("xo",  cxo_ehci_host_clk.c,            "msm_ehci_host"),
 
 	CLK_LOOKUP("measure",	measure_clk.c,	"debug"),
 
@@ -4968,9 +5035,11 @@
 
 	/* MM sensor clocks */
 	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "6e.qcom,camera"),
+	CLK_LOOKUP("cam_src_clk", mclk0_clk_src.c, "20.qcom,camera"),
 	CLK_LOOKUP("cam_src_clk", mclk2_clk_src.c, "6c.qcom,camera"),
 	CLK_LOOKUP("cam_src_clk", mclk1_clk_src.c, "90.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "6e.qcom,camera"),
+	CLK_LOOKUP("cam_clk", camss_mclk0_clk.c, "20.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk2_clk.c, "6c.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, "90.qcom,camera"),
 	CLK_LOOKUP("cam_clk", camss_mclk1_clk.c, ""),
@@ -5227,35 +5296,64 @@
 	CLK_LOOKUP("iface_clk", gcc_mmss_noc_cfg_ahb_clk.c, ""),
 	CLK_LOOKUP("iface_clk", gcc_ocmem_noc_cfg_ahb_clk.c, ""),
 
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etr"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tpiu"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-replicator"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etf"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-merg"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in0"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-in1"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-kpss"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-funnel-mmss"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-stm"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm0"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm1"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm2"),
-	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-etm3"),
+	/* CoreSight clocks */
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc322000.tmc"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc318000.tpiu"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31c000.replicator"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc307000.tmc"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31b000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc319000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31a000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc345000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc364000.funnel"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc321000.stm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33c000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33d000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33e000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc33f000.etm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc308000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc309000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30a000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30b000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30c000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30d000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30e000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30f000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc310000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc340000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc341000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc342000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc343000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc344000.cti"),
 
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etr"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tpiu"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-replicator"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-tmc-etf"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-merg"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in0"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-in1"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-kpss"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-funnel-mmss"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-stm"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm0"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm1"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm2"),
-	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "coresight-etm3"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc322000.tmc"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc318000.tpiu"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31c000.replicator"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc307000.tmc"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31b000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc319000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31a000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc345000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc364000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc321000.stm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33c000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33d000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33e000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc33f000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc308000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc309000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30a000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30b000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30c000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30d000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30e000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30f000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc310000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc340000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc341000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc342000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc343000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc344000.cti"),
 
 	CLK_LOOKUP("l2_m_clk",		l2_m_clk,     ""),
 	CLK_LOOKUP("krait0_m_clk",	krait0_m_clk, ""),
@@ -5429,16 +5527,24 @@
 
 static void __init mdss_clock_setup(void)
 {
-	clk_ops_byte = clk_ops_rcg_mnd;
+	clk_ops_byte = clk_ops_rcg;
 	clk_ops_byte.set_rate = set_rate_byte;
+	clk_ops_byte.handoff = byte_rcg_handoff;
+	clk_ops_byte.get_parent = NULL;
 
-	clk_ops_pixel = clk_ops_rcg;
+	clk_ops_pixel = clk_ops_rcg_mnd;
 	clk_ops_pixel.set_rate = set_rate_pixel;
+	clk_ops_pixel.handoff = pixel_rcg_handoff;
+	clk_ops_pixel.get_parent = NULL;
 
 	clk_ops_rcg_hdmi = clk_ops_rcg;
 	clk_ops_rcg_hdmi.set_rate = rcg_clk_set_rate_hdmi;
 
-	mdss_clk_ctrl_init();
+	/*
+	 * MDSS needs the ahb clock and needs to init before we register the
+	 * lookup table.
+	 */
+	mdss_clk_ctrl_pre_init(&mdss_ahb_clk.c);
 }
 
 static void __init msm8974_clock_post_init(void)
@@ -5471,8 +5577,6 @@
 	clk_prepare_enable(&gcc_mmss_noc_cfg_ahb_clk.c);
 	clk_prepare_enable(&gcc_ocmem_noc_cfg_ahb_clk.c);
 
-	mdss_clock_setup();
-
 	/* Set rates for single-rate clocks. */
 	clk_set_rate(&usb30_master_clk_src.c,
 			usb30_master_clk_src.freq_tbl[0].freq_hz);
@@ -5588,6 +5692,8 @@
 		for (i = 0; i < ARRAY_SIZE(qup_i2c_clks); i++)
 			qup_i2c_clks[i][0]->parent =  qup_i2c_clks[i][1];
 	}
+
+	mdss_clock_setup();
 }
 
 static int __init msm8974_clock_late_init(void)
diff --git a/arch/arm/mach-msm/clock-9625.c b/arch/arm/mach-msm/clock-9625.c
index b5f5a4e..9648320 100644
--- a/arch/arm/mach-msm/clock-9625.c
+++ b/arch/arm/mach-msm/clock-9625.c
@@ -33,7 +33,6 @@
 
 enum {
 	GCC_BASE,
-	LPASS_BASE,
 	APCS_BASE,
 	APCS_PLL_BASE,
 	N_BASES,
@@ -42,7 +41,6 @@
 static void __iomem *virt_bases[N_BASES];
 
 #define GCC_REG_BASE(x) (void __iomem *)(virt_bases[GCC_BASE] + (x))
-#define LPASS_REG_BASE(x) (void __iomem *)(virt_bases[LPASS_BASE] + (x))
 #define APCS_REG_BASE(x) (void __iomem *)(virt_bases[APCS_BASE] + (x))
 #define APCS_PLL_REG_BASE(x) (void __iomem *)(virt_bases[APCS_PLL_BASE] + (x))
 
@@ -203,54 +201,11 @@
 #define IPA_CNOC_CBCR                            0x1A88
 #define IPA_SLEEP_CBCR                           0x1A8C
 
-/* LPASS registers */
-/* TODO: Needs to double check lpass regiserts after get the SWI for hw */
-#define LPAPLL_MODE_REG				0x0000
-#define LPAPLL_L_REG				0x0004
-#define LPAPLL_M_REG				0x0008
-#define LPAPLL_N_REG				0x000C
-#define LPAPLL_USER_CTL_REG			0x0010
-#define LPAPLL_CONFIG_CTL_REG			0x0014
-#define LPAPLL_TEST_CTL_REG			0x0018
-#define LPAPLL_STATUS_REG			0x001C
-
-#define LPASS_DEBUG_CLK_CTL_REG			0x29000
-#define LPASS_LPA_PLL_VOTE_APPS_REG		0x2000
-
-#define LPAIF_PRI_CMD_RCGR			0xB000
-#define LPAIF_SEC_CMD_RCGR			0xC000
-#define LPAIF_PCM0_CMD_RCGR			0xF000
-#define LPAIF_PCM1_CMD_RCGR			0x10000
-#define SLIMBUS_CMD_RCGR			0x12000
-#define LPAIF_PCMOE_CMD_RCGR			0x13000
-
-#define AUDIO_CORE_BCR				0x4000
-
-#define AUDIO_CORE_GDSCR			0x7000
-#define AUDIO_CORE_LPAIF_PRI_OSR_CBCR		0xB014
-#define AUDIO_CORE_LPAIF_PRI_IBIT_CBCR		0xB018
-#define AUDIO_CORE_LPAIF_PRI_EBIT_CBCR		0xB01C
-#define AUDIO_CORE_LPAIF_SEC_OSR_CBCR		0xC014
-#define AUDIO_CORE_LPAIF_SEC_IBIT_CBCR		0xC018
-#define AUDIO_CORE_LPAIF_SEC_EBIT_CBCR		0xC01C
-#define AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR		0xF014
-#define AUDIO_CORE_LPAIF_PCM0_EBIT_CBCR		0xF018
-#define AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR		0x10014
-#define AUDIO_CORE_LPAIF_PCM1_EBIT_CBCR		0x10018
-#define AUDIO_CORE_RESAMPLER_CORE_CBCR		0x11014
-#define AUDIO_CORE_RESAMPLER_LFABIF_CBCR	0x11018
-#define AUDIO_CORE_SLIMBUS_CORE_CBCR		0x12014
-#define AUDIO_CORE_SLIMBUS_LFABIF_CBCR		0x12018
-#define AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR	0x13014
-
 /* Mux source select values */
 #define cxo_source_val	0
 #define gpll0_source_val 1
 #define gpll1_hsic_source_val 4
 #define gnd_source_val	5
-#define cxo_lpass_source_val 0
-#define lpapll0_lpass_source_val 1
-#define gpll0_lpass_source_val 5
 
 #define F_GCC_GND \
 	{ \
@@ -282,17 +237,6 @@
 			| BVAL(10, 8, s##_hsic_source_val), \
 	}
 
-#define F_LPASS(f, s, div, m, n) \
-	{ \
-		.freq_hz = (f), \
-		.src_clk = &s##_clk_src.c, \
-		.m_val = (m), \
-		.n_val = ~((n)-(m)) * !!(n), \
-		.d_val = ~(n),\
-		.div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
-			| BVAL(10, 8, s##_lpass_source_val), \
-	}
-
 #define F_APCS_PLL(f, l, m, n, pre_div, post_div, vco) \
 	{ \
 		.freq_hz = (f), \
@@ -429,21 +373,6 @@
 	},
 };
 
-static struct pll_vote_clk lpapll0_clk_src = {
-	.en_reg = (void __iomem *)LPASS_LPA_PLL_VOTE_APPS_REG,
-	.en_mask = BIT(0),
-	.status_reg = (void __iomem *)LPAPLL_STATUS_REG,
-	.status_mask = BIT(17),
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &cxo_clk_src.c,
-		.rate = 393216000,
-		.dbg_name = "lpapll0_clk_src",
-		.ops = &clk_ops_pll_vote,
-		CLK_INIT(lpapll0_clk_src.c),
-	},
-};
-
 static struct pll_vote_clk gpll1_clk_src = {
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
 	.en_mask = BIT(1),
@@ -1619,274 +1548,6 @@
 	},
 };
 
-/* LPASS clock data */
-static struct clk_freq_tbl ftbl_audio_core_lpaif_clock[] = {
-	F_LPASS(  512000,   lpapll0,   16,   1,   48),
-	F_LPASS(  768000,   lpapll0,   16,   1,   32),
-	F_LPASS( 1024000,   lpapll0,   16,   1,   24),
-	F_LPASS( 1536000,   lpapll0,   16,   1,   16),
-	F_LPASS( 2048000,   lpapll0,   16,   1,   12),
-	F_LPASS( 3072000,   lpapll0,   16,   1,    8),
-	F_LPASS( 4096000,   lpapll0,   16,   1,    6),
-	F_LPASS( 6144000,   lpapll0,   16,   1,    4),
-	F_LPASS( 8192000,   lpapll0,   16,   1,    3),
-	F_LPASS(12288000,   lpapll0,   16,   1,    2),
-	F_END
-};
-
-static struct clk_freq_tbl ftbl_audio_core_lpaif_pcm_clock[] = {
-	F_LPASS(  512000,   lpapll0,   16,   1,   48),
-	F_LPASS(  768000,   lpapll0,   16,   1,   32),
-	F_LPASS( 1024000,   lpapll0,   16,   1,   24),
-	F_LPASS( 1536000,   lpapll0,   16,   1,   16),
-	F_LPASS( 2048000,   lpapll0,   16,   1,   12),
-	F_LPASS( 3072000,   lpapll0,   16,   1,    8),
-	F_LPASS( 4096000,   lpapll0,   16,   1,    6),
-	F_LPASS( 6144000,   lpapll0,   16,   1,    4),
-	F_LPASS( 8192000,   lpapll0,   16,   1,    3),
-	F_END
-};
-
-static struct rcg_clk audio_core_lpaif_pcmoe_clk_src = {
-	.cmd_rcgr_reg =  LPAIF_PCMOE_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_clock,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcmoe_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP1(LOW, 12288000),
-		CLK_INIT(audio_core_lpaif_pcmoe_clk_src.c)
-	},
-};
-
-static struct rcg_clk audio_core_lpaif_pri_clk_src = {
-	.cmd_rcgr_reg =  LPAIF_PRI_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_clock,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pri_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12288000, NOMINAL, 24576000),
-		CLK_INIT(audio_core_lpaif_pri_clk_src.c)
-	},
-};
-
-static struct rcg_clk audio_core_lpaif_sec_clk_src = {
-	.cmd_rcgr_reg =  LPAIF_SEC_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_clock,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_sec_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 12288000, NOMINAL, 24576000),
-		CLK_INIT(audio_core_lpaif_sec_clk_src.c)
-	},
-};
-
-static struct clk_freq_tbl ftbl_audio_core_slimbus_core_clock[] = {
-	F_LPASS(26041000,   lpapll0,   1,   10,   151),
-	F_END
-};
-
-static struct rcg_clk audio_core_slimbus_core_clk_src = {
-	.cmd_rcgr_reg =  SLIMBUS_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_slimbus_core_clock,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_slimbus_core_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 13107000, NOMINAL, 26214000),
-		CLK_INIT(audio_core_slimbus_core_clk_src.c)
-	},
-};
-
-static struct rcg_clk audio_core_lpaif_pcm0_clk_src = {
-	.cmd_rcgr_reg =  LPAIF_PCM0_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_pcm_clock,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm0_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 4096000, NOMINAL, 8192000),
-		CLK_INIT(audio_core_lpaif_pcm0_clk_src.c)
-	},
-};
-
-static struct rcg_clk audio_core_lpaif_pcm1_clk_src = {
-	.cmd_rcgr_reg =  LPAIF_PCM1_CMD_RCGR,
-	.set_rate = set_rate_mnd,
-	.freq_tbl = ftbl_audio_core_lpaif_pcm_clock,
-	.current_freq = &rcg_dummy_freq,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm1_clk_src",
-		.ops = &clk_ops_rcg_mnd,
-		VDD_DIG_FMAX_MAP2(LOW, 4096000, NOMINAL, 8192000),
-		CLK_INIT(audio_core_lpaif_pcm1_clk_src.c)
-	},
-};
-
-static struct branch_clk audio_core_slimbus_lfabif_clk = {
-	.cbcr_reg = AUDIO_CORE_SLIMBUS_LFABIF_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_slimbus_lfabif_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_slimbus_lfabif_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm_data_oe_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM_DATA_OE_CBCR,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_lpaif_pcmoe_clk_src.c,
-		.dbg_name = "audio_core_lpaif_pcm_data_oe_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm_data_oe_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_slimbus_core_clk = {
-	.cbcr_reg = AUDIO_CORE_SLIMBUS_CORE_CBCR,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_slimbus_core_clk_src.c,
-		.dbg_name = "audio_core_slimbus_core_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_slimbus_core_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pri_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pri_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pri_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pri_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_IBIT_CBCR,
-	.has_sibling = 1,
-	.max_div = 15,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_lpaif_pri_clk_src.c,
-		.dbg_name = "audio_core_lpaif_pri_ibit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pri_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pri_osr_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PRI_OSR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_lpaif_pri_clk_src.c,
-		.dbg_name = "audio_core_lpaif_pri_osr_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pri_osr_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm0_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM0_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm0_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm0_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm0_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM0_IBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_lpaif_pcm0_clk_src.c,
-		.dbg_name = "audio_core_lpaif_pcm0_ibit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm0_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_sec_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_sec_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_sec_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_sec_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_IBIT_CBCR,
-	.has_sibling = 1,
-	.max_div = 15,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_lpaif_sec_clk_src.c,
-		.dbg_name = "audio_core_lpaif_sec_ibit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_sec_ibit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_sec_osr_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_SEC_OSR_CBCR,
-	.has_sibling = 1,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_lpaif_sec_clk_src.c,
-		.dbg_name = "audio_core_lpaif_sec_osr_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_sec_osr_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm1_ebit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM1_EBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.dbg_name = "audio_core_lpaif_pcm1_ebit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm1_ebit_clk.c),
-	},
-};
-
-static struct branch_clk audio_core_lpaif_pcm1_ibit_clk = {
-	.cbcr_reg = AUDIO_CORE_LPAIF_PCM1_IBIT_CBCR,
-	.has_sibling = 0,
-	.base = &virt_bases[LPASS_BASE],
-	.c = {
-		.parent = &audio_core_lpaif_pcm1_clk_src.c,
-		.dbg_name = "audio_core_lpaif_pcm1_ibit_clk",
-		.ops = &clk_ops_branch,
-		CLK_INIT(audio_core_lpaif_pcm1_ibit_clk.c),
-	},
-};
-
 static DEFINE_CLK_MEASURE(a5_m_clk);
 
 #ifdef CONFIG_DEBUG_FS
@@ -1897,7 +1558,7 @@
 	u32 debug_mux;
 };
 
-struct measure_mux_entry measure_mux[] = {
+struct measure_mux_entry measure_mux_common[] __initdata = {
 	{&gcc_pdm_ahb_clk.c,			GCC_BASE, 0x00d0},
 	{&gcc_usb_hsic_xcvr_fs_clk.c,		GCC_BASE, 0x005d},
 	{&gcc_usb_hsic_system_clk.c,		GCC_BASE, 0x0059},
@@ -1943,21 +1604,22 @@
 	{&gcc_qpic_clk.c,			GCC_BASE, 0x01D8},
 	{&gcc_qpic_ahb_clk.c,			GCC_BASE, 0x01D9},
 
-	{&audio_core_lpaif_pcm_data_oe_clk.c,	LPASS_BASE, 0x0030},
-	{&audio_core_slimbus_core_clk.c,	LPASS_BASE, 0x003d},
-	{&audio_core_lpaif_pri_clk_src.c,	LPASS_BASE, 0x0017},
-	{&audio_core_lpaif_sec_clk_src.c,	LPASS_BASE, 0x0016},
-	{&audio_core_slimbus_core_clk_src.c,	LPASS_BASE, 0x0011},
-	{&audio_core_lpaif_pcm1_clk_src.c,	LPASS_BASE, 0x0012},
-	{&audio_core_lpaif_pcm0_clk_src.c,	LPASS_BASE, 0x0013},
-	{&audio_core_lpaif_pcmoe_clk_src.c,	LPASS_BASE, 0x000f},
-	{&audio_core_slimbus_lfabif_clk.c,	LPASS_BASE, 0x003e},
-
 	{&a5_m_clk,				APCS_BASE, 0x3},
 
 	{&dummy_clk,				N_BASES,    0x0000},
 };
 
+struct measure_mux_entry measure_mux_v2_only[] __initdata = {
+	{&gcc_ipa_clk.c,			GCC_BASE, 0x01E0},
+	{&gcc_ipa_cnoc_clk.c,			GCC_BASE, 0x01E1},
+	{&gcc_ipa_sleep_clk.c,			GCC_BASE, 0x01E2},
+	{&gcc_qpic_clk.c,			GCC_BASE, 0x01D8},
+	{&gcc_qpic_ahb_clk.c,			GCC_BASE, 0x01D9},
+};
+
+struct measure_mux_entry measure_mux[ARRAY_SIZE(measure_mux_common)
+				+ ARRAY_SIZE(measure_mux_v2_only)];
+
 static int measure_clk_set_parent(struct clk *c, struct clk *parent)
 {
 	struct measure_clk *clk = to_measure_clk(c);
@@ -1982,7 +1644,6 @@
 	clk->sample_ticks = 0x10000;
 	clk->multiplier = 1;
 
-	writel_relaxed(0, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
 	writel_relaxed(0, GCC_REG_BASE(GCC_DEBUG_CLK_CTL_REG));
 
 	switch (measure_mux[i].base) {
@@ -1991,16 +1652,6 @@
 		clk_sel = measure_mux[i].debug_mux;
 		break;
 
-	case LPASS_BASE:
-		clk_sel = 0x161;
-		regval = BVAL(15, 0, measure_mux[i].debug_mux);
-		writel_relaxed(regval, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
-
-		/* Activate debug clock output */
-		regval |= BIT(20);
-		writel_relaxed(regval, LPASS_REG_BASE(LPASS_DEBUG_CLK_CTL_REG));
-		break;
-
 	case APCS_BASE:
 		clk_sel = 0x16A;
 		regval = BVAL(5, 3, measure_mux[i].debug_mux);
@@ -2208,35 +1859,6 @@
 	CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "fd400000.qcom,qcrypto"),
 	CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "fd400000.qcom,qcrypto"),
 
-	/* LPASS clocks */
-	CLK_LOOKUP("core_clk", audio_core_slimbus_core_clk.c, "fe12f000.slim"),
-	CLK_LOOKUP("iface_clk", audio_core_slimbus_lfabif_clk.c, ""),
-
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pri_clk_src.c,
-		   "msm-dai-q6-mi2s.0"),
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_pri_osr_clk.c,
-		   "msm-dai-q6-mi2s.0"),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pri_ebit_clk.c,
-		   "msm-dai-q6-mi2s.0"),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pri_ibit_clk.c,
-		   "msm-dai-q6-mi2s.0"),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_sec_clk_src.c,
-		   "msm-dai-q6-mi2s.1"),
-	CLK_LOOKUP("osr_clk", audio_core_lpaif_sec_osr_clk.c,
-		   "msm-dai-q6-mi2s.1"),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_sec_ebit_clk.c,
-		   "msm-dai-q6-mi2s.1"),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_sec_ibit_clk.c,
-		   "msm-dai-q6-mi2s.1"),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm0_clk_src.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm0_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm1_clk_src.c, ""),
-	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
-	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
-	CLK_LOOKUP("core_oe_src_clk", audio_core_lpaif_pcmoe_clk_src.c, ""),
-	CLK_LOOKUP("core_oe_clk", audio_core_lpaif_pcm_data_oe_clk.c, ""),
-
 	/* RPM and voter clocks */
 	CLK_LOOKUP("bus_clk", snoc_clk.c, ""),
 	CLK_LOOKUP("bus_clk", pnoc_clk.c, ""),
@@ -2260,7 +1882,7 @@
 
 	CLK_LOOKUP("a5_m_clk", a5_m_clk, ""),
 
-	/* Coresight QDSS clocks */
+	/* CoreSight clocks */
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc322000.tmc"),
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc318000.tpiu"),
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc31c000.replicator"),
@@ -2271,96 +1893,40 @@
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc321000.stm"),
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc332000.etm"),
 	CLK_LOOKUP("core_clk", qdss_clk.c, "fc332000.jtagmm"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc308000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc309000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30a000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30b000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30c000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30d000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30e000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc30f000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc310000.cti"),
+	CLK_LOOKUP("core_clk", qdss_clk.c, "fc333000.cti"),
 
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc322000.tmc"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc318000.tpiu"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc31c000.replicator"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc307000.tmc"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc31b000.funnel"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc319000.funnel"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc31a000.funnel"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc321000.stm"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc332000.etm"),
-	CLK_LOOKUP("core_a_clk", qdss_clk.c, "fc332000.jtagmm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc322000.tmc"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc318000.tpiu"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31c000.replicator"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc307000.tmc"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31b000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc319000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc31a000.funnel"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc321000.stm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc332000.etm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc332000.jtagmm"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc308000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc309000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30a000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30b000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30c000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30d000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30e000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc30f000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc310000.cti"),
+	CLK_LOOKUP("core_a_clk", qdss_a_clk.c, "fc333000.cti"),
 
 };
 
-static struct pll_config_regs gpll0_regs __initdata = {
-	.l_reg = (void __iomem *)GPLL0_L_REG,
-	.m_reg = (void __iomem *)GPLL0_M_REG,
-	.n_reg = (void __iomem *)GPLL0_N_REG,
-	.config_reg = (void __iomem *)GPLL0_USER_CTL_REG,
-	.mode_reg = (void __iomem *)GPLL0_MODE_REG,
-	.base = &virt_bases[GCC_BASE],
-};
-
-/* GPLL0 at 600 MHz, main output enabled. */
-static struct pll_config gpll0_config __initdata = {
-	.l = 0x1f,
-	.m = 0x1,
-	.n = 0x4,
-	.vco_val = 0x0,
-	.vco_mask = BM(21, 20),
-	.pre_div_val = 0x0,
-	.pre_div_mask = BM(14, 12),
-	.post_div_val = 0x0,
-	.post_div_mask = BM(9, 8),
-	.mn_ena_val = BIT(24),
-	.mn_ena_mask = BIT(24),
-	.main_output_val = BIT(0),
-	.main_output_mask = BIT(0),
-};
-
-static struct pll_config_regs gpll1_regs __initdata = {
-	.l_reg = (void __iomem *)GPLL1_L_REG,
-	.m_reg = (void __iomem *)GPLL1_M_REG,
-	.n_reg = (void __iomem *)GPLL1_N_REG,
-	.config_reg = (void __iomem *)GPLL1_USER_CTL_REG,
-	.mode_reg = (void __iomem *)GPLL1_MODE_REG,
-	.base = &virt_bases[GCC_BASE],
-};
-
-/* GPLL1 at 480 MHz, main output enabled. */
-static struct pll_config gpll1_config __initdata = {
-	.l = 0x19,
-	.m = 0x0,
-	.n = 0x1,
-	.vco_val = 0x0,
-	.vco_mask = BM(21, 20),
-	.pre_div_val = 0x0,
-	.pre_div_mask = BM(14, 12),
-	.post_div_val = 0x0,
-	.post_div_mask = BM(9, 8),
-	.main_output_val = BIT(0),
-	.main_output_mask = BIT(0),
-};
-
-static struct pll_config_regs lpapll0_regs __initdata = {
-	.l_reg = (void __iomem *)LPAPLL_L_REG,
-	.m_reg = (void __iomem *)LPAPLL_M_REG,
-	.n_reg = (void __iomem *)LPAPLL_N_REG,
-	.config_reg = (void __iomem *)LPAPLL_USER_CTL_REG,
-	.mode_reg = (void __iomem *)LPAPLL_MODE_REG,
-	.base = &virt_bases[LPASS_BASE],
-};
-
-/* LPAPLL0 at 393.216 MHz, main output enabled. */
-static struct pll_config lpapll0_config __initdata = {
-	.l = 0x28,
-	.m = 0x18,
-	.n = 0x19,
-	.vco_val = 0x0,
-	.vco_mask = BM(21, 20),
-	.pre_div_val = 0x0,
-	.pre_div_mask = BM(14, 12),
-	.post_div_val = BVAL(9, 8, 0x1),
-	.post_div_mask = BM(9, 8),
-	.mn_ena_val = BIT(24),
-	.mn_ena_mask = BIT(24),
-	.main_output_val = BIT(0),
-	.main_output_mask = BIT(0),
-};
-
 #define PLL_AUX_OUTPUT_BIT 1
 #define PLL_AUX2_OUTPUT_BIT 2
 
@@ -2402,64 +1968,9 @@
 	return 0;
 }
 
-static void __init configure_apcs_pll(void)
-{
-	u32 regval;
-
-	clk_set_rate(&apcspll_clk_src.c, 998400000);
-
-	writel_relaxed(0x00141200,
-			APCS_PLL_REG_BASE(APCS_CPU_PLL_CONFIG_CTL_REG));
-
-	/* Enable AUX and AUX2 output */
-	regval = readl_relaxed(APCS_PLL_REG_BASE(APCS_CPU_PLL_USER_CTL_REG));
-	regval |= BIT(PLL_AUX_OUTPUT_BIT) | BIT(PLL_AUX2_OUTPUT_BIT);
-	writel_relaxed(regval, APCS_PLL_REG_BASE(APCS_CPU_PLL_USER_CTL_REG));
-}
-
-#define PWR_ON_MASK		BIT(31)
-#define EN_REST_WAIT_MASK	(0xF << 20)
-#define EN_FEW_WAIT_MASK	(0xF << 16)
-#define CLK_DIS_WAIT_MASK	(0xF << 12)
-#define SW_OVERRIDE_MASK	BIT(2)
-#define HW_CONTROL_MASK		BIT(1)
-#define SW_COLLAPSE_MASK	BIT(0)
-
-/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
-#define EN_REST_WAIT_VAL	(0x2 << 20)
-#define EN_FEW_WAIT_VAL		(0x2 << 16)
-#define CLK_DIS_WAIT_VAL	(0x2 << 12)
-#define GDSC_TIMEOUT_US		50000
-
 static void __init reg_init(void)
 {
-	u32 regval, status;
-	int ret;
-
-	if (!(readl_relaxed(GCC_REG_BASE(GPLL0_STATUS_REG))
-			& gpll0_clk_src.status_mask))
-		configure_sr_hpm_lp_pll(&gpll0_config, &gpll0_regs, 1);
-
-	if (!(readl_relaxed(GCC_REG_BASE(GPLL1_STATUS_REG))
-			& gpll1_clk_src.status_mask))
-		configure_sr_hpm_lp_pll(&gpll1_config, &gpll1_regs, 1);
-
-	configure_sr_hpm_lp_pll(&lpapll0_config, &lpapll0_regs, 1);
-
-	/* TODO: Remove A5 pll configuration once the bootloader is avaiable */
-	regval = readl_relaxed(APCS_PLL_REG_BASE(APCS_CPU_PLL_MODE_REG));
-	if ((regval & BM(2, 0)) != 0x7)
-		configure_apcs_pll();
-
-	/* TODO:
-	 * 1) do we need to turn on AUX2 output too?
-	 * 2) if need to vote off all sleep clocks
-	 */
-
-	/* Enable GPLL0's aux outputs. */
-	regval = readl_relaxed(GCC_REG_BASE(GPLL0_USER_CTL_REG));
-	regval |= BIT(PLL_AUX_OUTPUT_BIT) | BIT(PLL_AUX2_OUTPUT_BIT);
-	writel_relaxed(regval, GCC_REG_BASE(GPLL0_USER_CTL_REG));
+	u32 regval;
 
 	/* Vote for GPLL0 to turn on. Needed by acpuclock. */
 	regval = readl_relaxed(GCC_REG_BASE(APCS_GPLL_ENA_VOTE_REG));
@@ -2471,31 +1982,6 @@
 	 * register.
 	 */
 	writel_relaxed(0x0, GCC_REG_BASE(APCS_CLOCK_SLEEP_ENA_VOTE));
-
-	/*
-	 * TODO: The following sequence enables the LPASS audio core GDSC.
-	 * Remove when this becomes unnecessary.
-	 */
-
-	/*
-	 * Disable HW trigger: collapse/restore occur based on registers writes.
-	 * Disable SW override: Use hardware state-machine for sequencing.
-	 */
-	regval = readl_relaxed(LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
-
-	/* Configure wait time between states. */
-	regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
-	regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
-	writel_relaxed(regval, LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-
-	regval = readl_relaxed(LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-	regval &= ~BIT(0);
-	writel_relaxed(regval, LPASS_REG_BASE(AUDIO_CORE_GDSCR));
-
-	ret = readl_poll_timeout(LPASS_REG_BASE(AUDIO_CORE_GDSCR), status,
-				status & PWR_ON_MASK, 50, GDSC_TIMEOUT_US);
-	WARN(ret, "LPASS Audio Core GDSC did not power on.\n");
 }
 
 static void __init msm9625_clock_post_init(void)
@@ -2524,8 +2010,6 @@
 	clk_set_rate(&usb_hsic_xcvr_fs_clk_src.c,
 			usb_hsic_xcvr_fs_clk_src.freq_tbl[0].freq_hz);
 	clk_set_rate(&pdm2_clk_src.c, pdm2_clk_src.freq_tbl[0].freq_hz);
-	clk_set_rate(&audio_core_slimbus_core_clk_src.c,
-			audio_core_slimbus_core_clk_src.freq_tbl[0].freq_hz);
 	/*
 	 * TODO: set rate on behalf of the i2c driver until the i2c driver
 	 *	 distinguish v1/v2 and call set rate accordingly.
@@ -2538,9 +2022,6 @@
 #define GCC_CC_PHYS		0xFC400000
 #define GCC_CC_SIZE		SZ_16K
 
-#define LPASS_CC_PHYS		0xFE000000
-#define LPASS_CC_SIZE		SZ_256K
-
 #define APCS_GCC_CC_PHYS	0xF9011000
 #define APCS_GCC_CC_SIZE	SZ_4K
 
@@ -2562,10 +2043,6 @@
 	if (!virt_bases[GCC_BASE])
 		panic("clock-9625: Unable to ioremap GCC memory!");
 
-	virt_bases[LPASS_BASE] = ioremap(LPASS_CC_PHYS, LPASS_CC_SIZE);
-	if (!virt_bases[LPASS_BASE])
-		panic("clock-9625: Unable to ioremap LPASS_CC memory!");
-
 	virt_bases[APCS_BASE] = ioremap(APCS_GCC_CC_PHYS, APCS_GCC_CC_SIZE);
 	if (!virt_bases[APCS_BASE])
 		panic("clock-9625: Unable to ioremap APCS_GCC_CC memory!");
@@ -2593,6 +2070,16 @@
 	enable_rpm_scaling();
 
 	reg_init();
+
+	/* Construct measurement mux array */
+	if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 2) {
+		memcpy(measure_mux,
+			measure_mux_v2_only, sizeof(measure_mux_v2_only));
+		memcpy(measure_mux + ARRAY_SIZE(measure_mux_v2_only),
+			measure_mux_common, sizeof(measure_mux_common));
+	} else
+		memcpy(measure_mux,
+			measure_mux_common, sizeof(measure_mux_common));
 }
 
 static int __init msm9625_clock_late_init(void)
diff --git a/arch/arm/mach-msm/clock-mdss-8974.c b/arch/arm/mach-msm/clock-mdss-8974.c
index 54fe11e..91e96b7 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.c
+++ b/arch/arm/mach-msm/clock-mdss-8974.c
@@ -27,6 +27,9 @@
 #define REG_R(addr)		readl_relaxed(addr)
 #define REG_W(data, addr)	writel_relaxed(data, addr)
 
+#define GDSC_PHYS		0xFD8C2304
+#define GDSC_SIZE		0x4
+
 #define DSI_PHY_PHYS		0xFD922800
 #define DSI_PHY_SIZE		0x00000800
 
@@ -99,6 +102,7 @@
 
 #define VCO_CLK				424000000
 static unsigned char *mdss_dsi_base;
+static unsigned char *gdsc_base;
 static int pll_byte_clk_rate;
 static int pll_pclk_rate;
 static int pll_initialized;
@@ -109,20 +113,19 @@
 static void __iomem *hdmi_phy_pll_base;
 static unsigned hdmi_pll_on;
 
-void __init mdss_clk_ctrl_init(void)
+void __init mdss_clk_ctrl_pre_init(struct clk *ahb_clk)
 {
+	BUG_ON(ahb_clk == NULL);
+
+	gdsc_base = ioremap(GDSC_PHYS, GDSC_SIZE);
+	if (!gdsc_base)
+		pr_err("%s: unable to remap gdsc base", __func__);
+
 	mdss_dsi_base = ioremap(DSI_PHY_PHYS, DSI_PHY_SIZE);
 	if (!mdss_dsi_base)
 		pr_err("%s: unable to remap dsi base", __func__);
 
-	mdss_dsi_ahb_clk = clk_get_sys("mdss_dsi_clk_ctrl", "iface_clk");
-	if (!IS_ERR(mdss_dsi_ahb_clk)) {
-		clk_prepare(mdss_dsi_ahb_clk);
-	} else {
-		mdss_dsi_ahb_clk = NULL;
-		pr_err("%s:%d unable to get dsi iface clock\n",
-			       __func__, __LINE__);
-	}
+	mdss_dsi_ahb_clk = ahb_clk;
 
 	hdmi_phy_base = ioremap(HDMI_PHY_PHYS, HDMI_PHY_SIZE);
 	if (!hdmi_phy_base)
@@ -133,6 +136,38 @@
 		pr_err("%s: unable to ioremap hdmi phy pll base", __func__);
 }
 
+#define PLL_POLL_MAX_READS 10
+#define PLL_POLL_TIMEOUT_US 50
+
+static int mdss_gdsc_enabled(void)
+{
+	if (!gdsc_base)
+		return 0;
+
+	return !!(readl_relaxed(gdsc_base) & BIT(31));
+}
+
+static int mdss_dsi_check_pll_lock(void)
+{
+	u32 status;
+
+	clk_prepare_enable(mdss_dsi_ahb_clk);
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_noirq((mdss_dsi_base + 0x02c0),
+				status,
+				((status & BIT(0)) == 1),
+				PLL_POLL_MAX_READS, PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: DSI PLL status=%x failed to Lock\n",
+				__func__, status);
+		pll_initialized = 0;
+	} else {
+		pll_initialized = 1;
+	}
+	clk_disable_unprepare(mdss_dsi_ahb_clk);
+
+	return pll_initialized;
+}
+
 static long mdss_dsi_pll_byte_round_rate(struct clk *c, unsigned long rate)
 {
 	if (pll_initialized)
@@ -166,7 +201,7 @@
 	}
 }
 
-static int mdss_dsi_pll_byte_set_rate(struct clk *c, unsigned long rate)
+static int __mdss_dsi_pll_byte_set_rate(struct clk *c, unsigned long rate)
 {
 	int pll_divcfg1, pll_divcfg2;
 	int half_bitclk_rate;
@@ -175,14 +210,6 @@
 	if (pll_initialized)
 		return 0;
 
-	if (!mdss_dsi_ahb_clk) {
-		pr_err("%s: mdss_dsi_ahb_clk not initialized\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	clk_enable(mdss_dsi_ahb_clk);
-
 	half_bitclk_rate = rate * 4;
 
 	pll_divcfg1 = (VCO_CLK / half_bitclk_rate) - 2;
@@ -233,13 +260,23 @@
 	pll_byte_clk_rate = 53000000;
 	pll_pclk_rate = 105000000;
 
-	clk_disable(mdss_dsi_ahb_clk);
 	pr_debug("%s: **** PLL initialized success\n", __func__);
 	pll_initialized = 1;
 
 	return 0;
 }
 
+static int mdss_dsi_pll_byte_set_rate(struct clk *c, unsigned long rate)
+{
+	int ret;
+
+	clk_prepare_enable(mdss_dsi_ahb_clk);
+	ret = __mdss_dsi_pll_byte_set_rate(c, rate);
+	clk_disable_unprepare(mdss_dsi_ahb_clk);
+
+	return ret;
+}
+
 static void mdss_dsi_uniphy_pll_lock_detect_setting(void)
 {
 	REG_W(0x04, mdss_dsi_base + 0x0264); /* LKDetect CFG2 */
@@ -264,20 +301,12 @@
 
 	if (!pll_initialized) {
 		if (dsi_pll_rate)
-			mdss_dsi_pll_byte_set_rate(c, dsi_pll_rate);
+			__mdss_dsi_pll_byte_set_rate(c, dsi_pll_rate);
 		else
 			pr_err("%s: Calling clk_en before set_rate\n",
 						__func__);
 	}
 
-	if (!mdss_dsi_ahb_clk) {
-		pr_err("%s: mdss_dsi_ahb_clk not initialized\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	clk_enable(mdss_dsi_ahb_clk);
-
 	mdss_dsi_uniphy_pll_sw_reset();
 	/* PLL power up */
 	/* Add HW recommended delay between
@@ -329,25 +358,17 @@
 	if ((status & 0x01) != 1) {
 		pr_err("%s: DSI PLL status=%x failed to Lock\n",
 		       __func__, status);
-		clk_disable(mdss_dsi_ahb_clk);
 		return -EINVAL;
 	}
 
 	pr_debug("%s: **** PLL Lock success\n", __func__);
-	clk_disable(mdss_dsi_ahb_clk);
 
 	return 0;
 }
 
 static void __mdss_dsi_pll_disable(void)
 {
-	if (!mdss_dsi_ahb_clk)
-		pr_err("%s: mdss_dsi_ahb_clk not initialized\n",
-				__func__);
-
-	clk_enable(mdss_dsi_ahb_clk);
 	writel_relaxed(0x00, mdss_dsi_base + 0x0220); /* GLB CFG */
-	clk_disable(mdss_dsi_ahb_clk);
 	pr_debug("%s: **** disable pll Initialize\n", __func__);
 	pll_initialized = 0;
 }
@@ -386,13 +407,28 @@
 	return ret;
 }
 
-static enum handoff mdss_dsi_pll_handoff(struct clk *c)
+static enum handoff mdss_dsi_pll_byte_handoff(struct clk *c)
 {
-	/*
-	 * FIXME: Continuous display is not implemented. So the display is
-	 * always off. Implement a poor man's handoff by always returning
-	 * "disabled".
-	 */
+	if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
+		c->rate = 53000000;
+		dsi_pll_rate = 53000000;
+		pll_byte_clk_rate = 53000000;
+		pll_pclk_rate = 105000000;
+		dsipll_refcount++;
+		return HANDOFF_ENABLED_CLK;
+	}
+
+	return HANDOFF_DISABLED_CLK;
+}
+
+static enum handoff mdss_dsi_pll_pixel_handoff(struct clk *c)
+{
+	if (mdss_gdsc_enabled() && mdss_dsi_check_pll_lock()) {
+		c->rate = 105000000;
+		dsipll_refcount++;
+		return HANDOFF_ENABLED_CLK;
+	}
+
 	return HANDOFF_DISABLED_CLK;
 }
 
@@ -814,7 +850,7 @@
 	.disable = mdss_dsi_pll_disable,
 	.set_rate = mdss_dsi_pll_pixel_set_rate,
 	.round_rate = mdss_dsi_pll_pixel_round_rate,
-	.handoff = mdss_dsi_pll_handoff,
+	.handoff = mdss_dsi_pll_pixel_handoff,
 };
 
 struct clk_ops clk_ops_dsi_byte_pll = {
@@ -822,5 +858,5 @@
 	.disable = mdss_dsi_pll_disable,
 	.set_rate = mdss_dsi_pll_byte_set_rate,
 	.round_rate = mdss_dsi_pll_byte_round_rate,
-	.handoff = mdss_dsi_pll_handoff,
+	.handoff = mdss_dsi_pll_byte_handoff,
 };
diff --git a/arch/arm/mach-msm/clock-mdss-8974.h b/arch/arm/mach-msm/clock-mdss-8974.h
index dbae988..e242669 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.h
+++ b/arch/arm/mach-msm/clock-mdss-8974.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,8 @@
 extern struct clk_ops clk_ops_dsi_byte_pll;
 extern struct clk_ops clk_ops_dsi_pixel_pll;
 
-void mdss_clk_ctrl_init(void);
+void mdss_clk_ctrl_pre_init(struct clk *ahb_clk);
+void mdss_clk_ctrl_post_init(void);
 int hdmi_pll_enable(void);
 void hdmi_pll_disable(void);
 int hdmi_pll_set_rate(unsigned long rate);
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index 10ee1e3..b7707d7 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -41,6 +41,7 @@
 #include <mach/msm_rtb.h>
 #include <linux/msm_ion.h>
 #include "clock.h"
+#include "pm.h"
 #include "devices.h"
 #include "footswitch.h"
 #include "msm_watchdog.h"
@@ -141,6 +142,19 @@
 	},
 };
 
+static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
+	.base_addr = MSM_ACC0_BASE + 0x08,
+	.cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
+	.mask = 1UL << 13,
+};
+struct platform_device msm8064_cpu_slp_status = {
+	.name		= "cpu_slp_status",
+	.id		= -1,
+	.dev = {
+		.platform_data = &msm_pm_slp_sts_data,
+	},
+};
+
 static struct msm_watchdog_pdata msm_watchdog_pdata = {
 	.pet_time = 10000,
 	.bark_time = 11000,
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 6fe8ccb..2f8f547 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -53,6 +53,20 @@
 	.retention_calls_tz = true,
 };
 
+static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
+	.base_addr = MSM_ACC0_BASE + 0x08,
+	.cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
+	.mask = 1UL << 13,
+};
+
+struct platform_device msm8930_cpu_slp_status = {
+	.name		= "cpu_slp_status",
+	.id		= -1,
+	.dev = {
+		.platform_data = &msm_pm_slp_sts_data,
+	},
+};
+
 struct platform_device msm8930_pm_8x60 = {
 	.name		= "pm-8x60",
 	.id		= -1,
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 6a344be..2bd9dfe 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -1703,6 +1703,19 @@
 	.id		= -1,
 };
 
+static struct msm_pm_sleep_status_data msm_pm_slp_sts_data = {
+	.base_addr = MSM_ACC0_BASE + 0x08,
+	.cpu_offset = MSM_ACC1_BASE - MSM_ACC0_BASE,
+	.mask = 1UL << 13,
+};
+struct platform_device msm8960_cpu_slp_status = {
+	.name		= "cpu_slp_status",
+	.id		= -1,
+	.dev = {
+		.platform_data = &msm_pm_slp_sts_data,
+	},
+};
+
 static struct msm_watchdog_pdata msm_watchdog_pdata = {
 	.pet_time = 10000,
 	.bark_time = 11000,
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index cfa9281..f9e7863 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -412,6 +412,7 @@
 	.uart_tx_gpio	= 67,
 	.uart_rx_gpio	= 66,
 	.line		= 1,
+	.set_uart_clk_zero = true,
 };
 
 static struct resource msm_uart_gsbi9_resources[] = {
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 53eca3e..327c11d 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -124,6 +124,10 @@
 extern struct platform_device msm_device_hsusb_host2;
 extern struct platform_device msm_device_hsic_host;
 
+extern struct platform_device msm8960_cpu_slp_status;
+extern struct platform_device msm8064_cpu_slp_status;
+extern struct platform_device msm8930_cpu_slp_status;
+
 extern struct platform_device msm_device_otg;
 extern struct platform_device msm_android_usb_device;
 extern struct platform_device msm_android_usb_hsic_device;
diff --git a/arch/arm/mach-msm/gpiomux.c b/arch/arm/mach-msm/gpiomux.c
index 37ff421..4714210 100644
--- a/arch/arm/mach-msm/gpiomux.c
+++ b/arch/arm/mach-msm/gpiomux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010,2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,12 @@
 static struct gpiomux_setting *msm_gpiomux_sets;
 static unsigned msm_gpiomux_ngpio;
 
-int msm_gpiomux_write(unsigned gpio, enum msm_gpiomux_setting which,
+static int msm_gpiomux_store(unsigned gpio, enum msm_gpiomux_setting which,
 	struct gpiomux_setting *setting, struct gpiomux_setting *old_setting)
 {
 	struct msm_gpiomux_rec *rec = msm_gpiomux_recs + gpio;
 	unsigned set_slot = gpio * GPIOMUX_NSETTINGS + which;
 	unsigned long irq_flags;
-	struct gpiomux_setting *new_set;
 	int status = 0;
 
 	if (!msm_gpiomux_recs)
@@ -55,13 +54,31 @@
 		rec->sets[which] = NULL;
 	}
 
+	spin_unlock_irqrestore(&gpiomux_lock, irq_flags);
+	return status;
+}
+
+int msm_gpiomux_write(unsigned gpio, enum msm_gpiomux_setting which,
+	struct gpiomux_setting *setting, struct gpiomux_setting *old_setting)
+{
+	int ret;
+	unsigned long irq_flags;
+	struct gpiomux_setting *new_set;
+	struct msm_gpiomux_rec *rec = msm_gpiomux_recs + gpio;
+
+	ret = msm_gpiomux_store(gpio, which, setting, old_setting);
+	if (ret < 0)
+		return ret;
+
+	spin_lock_irqsave(&gpiomux_lock, irq_flags);
+
 	new_set = rec->ref ? rec->sets[GPIOMUX_ACTIVE] :
 		rec->sets[GPIOMUX_SUSPENDED];
 	if (new_set)
 		__msm_gpiomux_write(gpio, *new_set);
 
 	spin_unlock_irqrestore(&gpiomux_lock, irq_flags);
-	return status;
+	return ret;
 }
 EXPORT_SYMBOL(msm_gpiomux_write);
 
@@ -134,6 +151,22 @@
 }
 EXPORT_SYMBOL(msm_gpiomux_init);
 
+void msm_gpiomux_install_nowrite(struct msm_gpiomux_config *configs,
+				unsigned nconfigs)
+{
+	unsigned c, s;
+	int rc;
+
+	for (c = 0; c < nconfigs; ++c) {
+		for (s = 0; s < GPIOMUX_NSETTINGS; ++s) {
+			rc = msm_gpiomux_store(configs[c].gpio, s,
+				configs[c].settings[s], NULL);
+			if (rc)
+				pr_err("%s: write failure: %d\n", __func__, rc);
+		}
+	}
+}
+
 void msm_gpiomux_install(struct msm_gpiomux_config *configs, unsigned nconfigs)
 {
 	unsigned c, s;
diff --git a/arch/arm/mach-msm/include/mach/ecm_ipa.h b/arch/arm/mach-msm/include/mach/ecm_ipa.h
new file mode 100644
index 0000000..008a659
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ecm_ipa.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ECM_IPA_H_
+#define _ECM_IPA_H_
+
+#include <mach/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ecm_ipa_callback)(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data);
+
+
+#ifdef CONFIG_ECM_IPA
+
+int ecm_ipa_init(ecm_ipa_callback * ecm_ipa_rx_dp_notify,
+		ecm_ipa_callback * ecm_ipa_tx_dp_notify,
+		void **priv);
+
+int ecm_ipa_configure(u8 host_ethaddr[], u8 device_ethaddr[],
+		void *priv);
+
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+		void *priv);
+
+int ecm_ipa_disconnect(void *priv);
+
+void ecm_ipa_cleanup(void *priv);
+
+#else /* CONFIG_ECM_IPA*/
+
+static inline int ecm_ipa_init(ecm_ipa_callback *ecm_ipa_rx_dp_notify,
+		ecm_ipa_callback *ecm_ipa_tx_dp_notify,
+		void **priv)
+{
+	return 0;
+}
+
+static inline int ecm_ipa_configure(u8 host_ethaddr[], u8 device_ethaddr[],
+		void *priv)
+{
+	return 0;
+}
+
+static inline int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+		void *priv)
+{
+	return 0;
+}
+
+static inline int ecm_ipa_disconnect(void *priv)
+{
+	return 0;
+}
+
+static inline void ecm_ipa_cleanup(void *priv)
+{
+
+}
+#endif /* CONFIG_ECM_IPA*/
+
+#endif /* _ECM_IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/gpiomux.h b/arch/arm/mach-msm/include/mach/gpiomux.h
index 85bbbd1..5ffcabb 100644
--- a/arch/arm/mach-msm/include/mach/gpiomux.h
+++ b/arch/arm/mach-msm/include/mach/gpiomux.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011,2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,12 @@
  */
 void msm_gpiomux_install(struct msm_gpiomux_config *configs, unsigned nconfigs);
 
+/* Install a block of gpiomux configurations in gpiomux. Do not however write
+ * to hardware. Just store the settings to be retrieved at a later time
+ */
+void msm_gpiomux_install_nowrite(struct msm_gpiomux_config *configs,
+				unsigned nconfigs);
+
 /* Increment a gpio's reference count, possibly activating the line. */
 int __must_check msm_gpiomux_get(unsigned gpio);
 
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-v0.h b/arch/arm/mach-msm/include/mach/iommu_hw-v0.h
index 198f72f..68dec79 100644
--- a/arch/arm/mach-msm/include/mach/iommu_hw-v0.h
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-v0.h
@@ -830,6 +830,11 @@
 #define IDR		(0xFF8)
 #define RPU_ACR		(0xFFC)
 
+/* Event Monitor (EM) Registers */
+#define EMMC		(0xE000)
+#define EMCS		(0xE004)
+#define EMCC_N		(0xE100)
+#define EMC_N		(0xE200)
 
 /* Context Bank Registers */
 #define SCTLR		(0x000)
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-v1.h b/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
index 4f08187..554f7e0 100644
--- a/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
@@ -15,8 +15,6 @@
 
 #define CTX_SHIFT  12
 #define CTX_OFFSET 0x8000
-#define IMPLDEF_OFFSET	0x2000
-#define IMPLDEF_LENGTH	0xDFF
 
 #define GET_GLOBAL_REG(reg, base) (readl_relaxed((base) + (reg)))
 #define GET_CTX_REG(reg, base, ctx) \
diff --git a/arch/arm/mach-msm/include/mach/iommu_perfmon.h b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
index 59f58c1..c03c752 100644
--- a/arch/arm/mach-msm/include/mach/iommu_perfmon.h
+++ b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
@@ -12,6 +12,7 @@
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/list.h>
+#include <linux/irqreturn.h>
 
 #ifndef MSM_IOMMU_PERFMON_H
 #define MSM_IOMMU_PERFMON_H
@@ -75,6 +76,7 @@
  * @evt_irq:    irq number for event overflow interrupt
  * @iommu_dev:  pointer to iommu device
  * @ops:        iommu access operations pointer.
+ * @hw_ops:     iommu pm hw access operations pointer.
  */
 struct iommu_info {
 	const char *iommu_name;
@@ -82,6 +84,7 @@
 	int evt_irq;
 	struct device *iommu_dev;
 	struct iommu_access_ops *ops;
+	struct iommu_pm_hw_ops *hw_ops;
 };
 
 /**
@@ -112,9 +115,63 @@
 	struct mutex lock;
 };
 
-extern struct iommu_access_ops iommu_access_ops;
+/**
+ * struct iommu_hw_ops - Callbacks for accessing IOMMU HW
+ * @initialize_hw: Call to do any initialization before enabling ovf interrupts
+ * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise
+ * @grp_enable: Call to enable a counter group
+ * @grp_disable: Call to disable a counter group
+ * @enable_pm: Call to enable PM
+ * @disable_pm: Call to disable PM
+ * @reset_counters:  Call to reset counters
+ * @check_for_overflow:  Call to check for overflow
+ * @evt_ovfl_int_handler: Overflow interrupt handler callback
+ * @counter_enable: Call to enable counters
+ * @counter_disable: Call to disable counters
+ * @ovfl_int_enable: Call to enable overflow interrupts
+ * @ovfl_int_disable: Call to disable overflow interrupts
+ * @set_event_class: Call to set event class
+ * @read_counter: Call to read a counter value
+ */
+struct iommu_pm_hw_ops {
+	void (*initialize_hw)(const struct iommu_pmon *);
+	unsigned int (*is_hw_access_OK)(const struct iommu_pmon *);
+	void (*grp_enable)(struct iommu_info *, unsigned int);
+	void (*grp_disable)(struct iommu_info *, unsigned int);
+	void (*enable_pm)(struct iommu_info *);
+	void (*disable_pm)(struct iommu_info *);
+	void (*reset_counters)(const struct iommu_info *);
+	void (*check_for_overflow)(struct iommu_pmon *);
+	irqreturn_t (*evt_ovfl_int_handler)(int, void *);
+	void (*counter_enable)(struct iommu_info *,
+			       struct iommu_pmon_counter *);
+	void (*counter_disable)(struct iommu_info *,
+			       struct iommu_pmon_counter *);
+	void (*ovfl_int_enable)(struct iommu_info *,
+				const struct iommu_pmon_counter *);
+	void (*ovfl_int_disable)(struct iommu_info *,
+				const struct iommu_pmon_counter *);
+	void (*set_event_class)(struct iommu_pmon *pmon, unsigned int,
+				unsigned int);
+	unsigned int (*read_counter)(struct iommu_pmon_counter *);
+};
+
+extern struct iommu_access_ops iommu_access_ops_v0;
+extern struct iommu_access_ops iommu_access_ops_v1;
+#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1
 
 #ifdef CONFIG_MSM_IOMMU_PMON
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv0 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void);
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv1 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void);
+
 /**
  * Allocate memory for performance monitor structure. Must
  * be called before iommu_pm_iommu_register
@@ -150,6 +207,16 @@
   */
 void msm_iommu_detached(struct device *dev);
 #else
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+	return NULL;
+}
+
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+	return NULL;
+}
+
 static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev)
 {
 	return NULL;
diff --git a/arch/arm/mach-msm/include/mach/ipa.h b/arch/arm/mach-msm/include/mach/ipa.h
index 26a055d..f2a4427 100644
--- a/arch/arm/mach-msm/include/mach/ipa.h
+++ b/arch/arm/mach-msm/include/mach/ipa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -354,6 +354,136 @@
 	IPA_BRIDGE_TYPE_MAX
 };
 
+/**
+ * enum ipa_rm_resource_name - IPA RM clients identification names
+ *
+ * Add new mapping to ipa_rm_dep_prod_index() / ipa_rm_dep_cons_index()
+ * when adding new entry to this enum.
+ */
+enum ipa_rm_resource_name {
+	IPA_RM_RESOURCE_PROD = 0,
+	IPA_RM_RESOURCE_BRIDGE_PROD = IPA_RM_RESOURCE_PROD,
+	IPA_RM_RESOURCE_A2_PROD,
+	IPA_RM_RESOURCE_USB_PROD,
+	IPA_RM_RESOURCE_HSIC_PROD,
+	IPA_RM_RESOURCE_STD_ECM_PROD,
+	IPA_RM_RESOURCE_WWAN_0_PROD,
+	IPA_RM_RESOURCE_WWAN_1_PROD,
+	IPA_RM_RESOURCE_WWAN_2_PROD,
+	IPA_RM_RESOURCE_WWAN_3_PROD,
+	IPA_RM_RESOURCE_WWAN_4_PROD,
+	IPA_RM_RESOURCE_WWAN_5_PROD,
+	IPA_RM_RESOURCE_WWAN_6_PROD,
+	IPA_RM_RESOURCE_WWAN_7_PROD,
+	IPA_RM_RESOURCE_WLAN_PROD,
+	IPA_RM_RESOURCE_PROD_MAX,
+
+	IPA_RM_RESOURCE_A2_CONS = IPA_RM_RESOURCE_PROD_MAX,
+	IPA_RM_RESOURCE_USB_CONS,
+	IPA_RM_RESOURCE_HSIC_CONS,
+	IPA_RM_RESOURCE_MAX
+};
+
+/**
+ * enum ipa_rm_event - IPA RM events
+ *
+ * Indicate the resource state change
+ */
+enum ipa_rm_event {
+	IPA_RM_RESOURCE_GRANTED,
+	IPA_RM_RESOURCE_RELEASED
+};
+
+typedef void (*ipa_rm_notify_cb)(void *user_data,
+		enum ipa_rm_event event,
+		unsigned long data);
+/**
+ * struct ipa_rm_register_params - information needed to
+ *      register IPA RM client with IPA RM
+ *
+ * @user_data: IPA RM client provided information
+ *		to be passed to notify_cb callback below
+ * @notify_cb: callback which is called by resource
+ *		to notify the IPA RM client about its state
+ *		change IPA RM client is expected to perform non
+ *		blocking operations only in notify_cb and
+ *		release notification context as soon as
+ *		possible.
+ */
+struct ipa_rm_register_params {
+	void *user_data;
+	ipa_rm_notify_cb notify_cb;
+};
+
+/**
+ * struct ipa_rm_create_params - information needed to initialize
+ *				the resource
+ * @name: resource name
+ * @reg_params: register parameters, contains are ignored
+ *		for consumer resource NULL should be provided
+ *		for consumer resource
+ * @request_resource: function which should be called to request resource,
+ *			NULL should be provided for producer resource
+ * @release_resource: function which should be called to release resource,
+ *			NULL should be provided for producer resource
+ *
+ * IPA RM client is expected to perform non blocking operations only
+ * in request_resource and release_resource functions and
+ * release notification context as soon as possible.
+ */
+struct ipa_rm_create_params {
+	enum ipa_rm_resource_name name;
+	struct ipa_rm_register_params reg_params;
+	int (*request_resource)(void);
+	int (*release_resource)(void);
+};
+
+#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_"
+#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_"
+
+enum a2_mux_event_type {
+	A2_MUX_RECEIVE,
+	A2_MUX_WRITE_DONE
+};
+
+enum a2_mux_logical_channel_id {
+	A2_MUX_WWAN_0,
+	A2_MUX_WWAN_1,
+	A2_MUX_WWAN_2,
+	A2_MUX_WWAN_3,
+	A2_MUX_WWAN_4,
+	A2_MUX_WWAN_5,
+	A2_MUX_WWAN_6,
+	A2_MUX_WWAN_7,
+	A2_MUX_TETHERED_0,
+	A2_MUX_NUM_CHANNELS
+};
+
+typedef void (*a2_mux_notify_cb)(void *user_data,
+		enum a2_mux_event_type event,
+		unsigned long data);
+
+/**
+ * enum teth_tethering_mode - Tethering mode (Rmnet / MBIM)
+ */
+enum teth_tethering_mode {
+	TETH_TETHERING_MODE_RMNET,
+	TETH_TETHERING_MODE_MBIM,
+	TETH_TETHERING_MODE_MAX,
+};
+
+/**
+ * struct teth_bridge_connect_params - Parameters used in teth_bridge_connect()
+ * @ipa_usb_pipe_hdl:	IPA to USB pipe handle, returned from ipa_connect()
+ * @usb_ipa_pipe_hdl:	USB to IPA pipe handle, returned from ipa_connect()
+ * @tethering_mode:	Rmnet or MBIM
+ */
+struct teth_bridge_connect_params {
+	u32 ipa_usb_pipe_hdl;
+	u32 usb_ipa_pipe_hdl;
+	enum teth_tethering_mode tethering_mode;
+};
+
 #ifdef CONFIG_IPA
 
 /*
@@ -489,8 +619,105 @@
 
 int ipa_teardown_sys_pipe(u32 clnt_hdl);
 
+/*
+ * Resource manager
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params);
+
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+		enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+				 unsigned long msecs);
+
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_request_resource(
+				enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_release_resource(
+				enum ipa_rm_resource_name resource_name);
+
+/*
+ * a2 service
+ */
+int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+			void *user_data,
+			a2_mux_notify_cb notify_cb);
+
+int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_write(enum a2_mux_logical_channel_id lcid, struct sk_buff *skb);
+
+int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid);
+
+int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
+		unsigned int *clnt_cons_handle,
+		unsigned int *clnt_prod_handle);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int teth_bridge_init(ipa_notify_cb *usb_notify_cb_ptr, void **private_data_ptr);
+
+int teth_bridge_disconnect(void);
+
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
 #else /* CONFIG_IPA */
 
+static inline int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+	void *user_data, a2_mux_notify_cb notify_cb)
+{
+	return -EPERM;
+}
+
+static inline int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
+{
+	return -EPERM;
+}
+
+static inline int a2_mux_write(enum a2_mux_logical_channel_id lcid,
+			       struct sk_buff *skb)
+{
+	return -EPERM;
+}
+
+static inline int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
+{
+	return -EPERM;
+}
+
+static inline int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
+{
+	return -EPERM;
+}
+
+static inline int a2_mux_get_tethered_client_handles(
+	enum a2_mux_logical_channel_id lcid, unsigned int *clnt_cons_handle,
+	unsigned int *clnt_prod_handle)
+{
+	return -EPERM;
+}
+
 /*
  * Connect / Disconnect
  */
@@ -778,6 +1005,104 @@
 	return -EPERM;
 }
 
+/*
+ * Resource manager
+ */
+static inline int ipa_rm_create_resource(
+		struct ipa_rm_create_params *create_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency(
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_delete_dependency(
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_request_resource(
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_release_resource(
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_notify_completion(enum ipa_rm_event event,
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_init(
+		enum ipa_rm_resource_name resource_name,
+			unsigned long msecs)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_destroy(
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_request_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_release_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+/*
+ * Tethering bridge (Rmnetm / MBIM)
+ */
+static inline int teth_bridge_init(ipa_notify_cb *usb_notify_cb_ptr,
+				   void **private_data_ptr)
+{
+	return -EPERM;
+}
+
+static inline int teth_bridge_disconnect(void)
+{
+	return -EPERM;
+}
+
+static inline int teth_bridge_connect(struct teth_bridge_connect_params
+				      *connect_params)
+{
+	return -EPERM;
+}
+
 #endif /* CONFIG_IPA*/
 
 #endif /* _IPA_H_ */
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 4f475fe..ebb096e 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -81,7 +81,7 @@
 #define MSM_LPASS_CLK_CTL_BASE	IOMEM(0xFA015000)	/*  4K	*/
 #define MSM_HFPLL_BASE		IOMEM(0xFA016000)	/*  4K	*/
 #define MSM_TLMM_BASE		IOMEM(0xFA017000)	/* 16K	*/
-#define MSM_SHARED_RAM_BASE	IOMEM(0xFA300000)	/*  2M  */
+#define MSM_SHARED_RAM_BASE	IOMEM(0xFA400000)	/*  2M  */
 #define MSM_SIC_NON_SECURE_BASE	IOMEM(0xFA600000)	/* 64K	*/
 #define MSM_HDMI_BASE		IOMEM(0xFA800000)	/*  4K  */
 #define MSM_RPM_BASE		IOMEM(0xFA801000)	/*  4K	*/
diff --git a/arch/arm/mach-msm/include/mach/msm_ipc_logging.h b/arch/arm/mach-msm/include/mach/msm_ipc_logging.h
index ec9fdb0..b675c00 100644
--- a/arch/arm/mach-msm/include/mach/msm_ipc_logging.h
+++ b/arch/arm/mach-msm/include/mach/msm_ipc_logging.h
@@ -113,7 +113,7 @@
  * @ilctxt: Debug Log Context created using ipc_log_context_create()
  * @fmt:    Data specified using format specifiers
  */
-int ipc_log_string(void *ilctxt, const char *fmt, ...);
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
 
 /*
  * Print a string to decode context.
diff --git a/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h b/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h
index 7bdd35a..7a24190 100644
--- a/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h
+++ b/arch/arm/mach-msm/include/mach/msm_serial_hs_lite.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,9 @@
  * @uart_rx_gpio: GPIO number for UART Rx Line.
  * @uart_cts_gpio: GPIO number for UART CTS Line.
  * @uart_rfr_gpio: GPIO number for UART RFR Line.
+ * @set_uart_clk_zero: use this if setting UART Clock to zero is required
+ * It is mainly required where same UART is used across different processor.
+ * Make sure that Clock driver for platform support setting clock rate to zero.
  * @use_pm: use this to enable power management
  * @line: Used to set UART Port number.
  */
@@ -32,6 +35,7 @@
 	unsigned uart_rx_gpio;
 	unsigned uart_cts_gpio;
 	unsigned uart_rfr_gpio;
+	bool set_uart_clk_zero;
 	bool use_pm;
 	int line;
 };
diff --git a/arch/arm/mach-msm/include/mach/qpnp-int.h b/arch/arm/mach-msm/include/mach/qpnp-int.h
index 8818bf2..2b86216 100644
--- a/arch/arm/mach-msm/include/mach/qpnp-int.h
+++ b/arch/arm/mach-msm/include/mach/qpnp-int.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,14 @@
 				struct qpnp_local_int *li_cb);
 
 /**
+ * qpnpint_unregister_controller() - Unregister local interrupt callbacks
+ *
+ * Used by the PMIC Arbiter driver or equivalent to unregister
+ * callbacks for interrupt events.
+ */
+int qpnpint_unregister_controller(struct device_node *node);
+
+/**
  * qpnpint_handle_irq - Main interrupt handling routine
  *
  * Pass a PMIC Arbiter interrupt to Linux.
@@ -78,6 +86,12 @@
 	return -ENXIO;
 }
 
+static inline int qpnpint_unregister_controller(struct device_node *node)
+
+{
+	return -ENXIO;
+}
+
 static inline int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
 		       struct qpnp_irq_spec *spec)
 {
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 02272bc..eb44c40 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -19,15 +19,15 @@
 #include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <asm/sizes.h>
 #include <asm/page.h>
 #include <mach/iommu.h>
 #include <mach/iommu_domains.h>
 #include <mach/socinfo.h>
 #include <mach/msm_subsystem_map.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
 
 struct msm_iova_data {
 	struct rb_node node;
diff --git a/arch/arm/mach-msm/ipc_router.c b/arch/arm/mach-msm/ipc_router.c
index 265435a..ea874bd 100644
--- a/arch/arm/mach-msm/ipc_router.c
+++ b/arch/arm/mach-msm/ipc_router.c
@@ -558,9 +558,9 @@
 	mutex_init(&port_ptr->port_rx_q_lock);
 	init_waitqueue_head(&port_ptr->port_rx_wait_q);
 	snprintf(port_ptr->rx_wakelock_name, MAX_WAKELOCK_NAME_SZ,
-		 "msm_ipc_read%08x:%08x",
-		 port_ptr->this_port.node_id,
-		 port_ptr->this_port.port_id);
+		 "ipc%08x_%s",
+		 port_ptr->this_port.port_id,
+		 current->comm);
 	wake_lock_init(&port_ptr->port_rx_wake_lock,
 			WAKE_LOCK_SUSPEND, port_ptr->rx_wakelock_name);
 
diff --git a/arch/arm/mach-msm/ipc_socket.c b/arch/arm/mach-msm/ipc_socket.c
index d31af84..a08e7de 100644
--- a/arch/arm/mach-msm/ipc_socket.c
+++ b/arch/arm/mach-msm/ipc_socket.c
@@ -20,6 +20,9 @@
 #include <linux/fcntl.h>
 #include <linux/gfp.h>
 #include <linux/msm_ipc.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/qmi_encdec.h>
 
 #include <asm/string.h>
 #include <asm/atomic.h>
@@ -27,16 +30,93 @@
 #include <net/sock.h>
 
 #include <mach/msm_ipc_router.h>
+#include <mach/msm_ipc_logging.h>
 
 #include "ipc_router.h"
 #include "msm_ipc_router_security.h"
 
 #define msm_ipc_sk(sk) ((struct msm_ipc_sock *)(sk))
 #define msm_ipc_sk_port(sk) ((struct msm_ipc_port *)(msm_ipc_sk(sk)->port))
+#define REQ_RESP_IPC_LOG_PAGES 5
+#define IND_IPC_LOG_PAGES 5
+#define IPC_SEND 1
+#define IPC_RECV 2
+#define IPC_REQ_RESP_LOG(level, buf...) \
+do { \
+	if (ipc_req_resp_log_txt) { \
+		ipc_log_string(ipc_req_resp_log_txt, buf); \
+	} \
+} while (0) \
+
+#define IPC_IND_LOG(level, buf...) \
+do { \
+	if (ipc_ind_log_txt) { \
+		ipc_log_string(ipc_ind_log_txt, buf); \
+	} \
+} while (0) \
 
 static int sockets_enabled;
 static struct proto msm_ipc_proto;
 static const struct proto_ops msm_ipc_proto_ops;
+static void *ipc_req_resp_log_txt;
+static void *ipc_ind_log_txt;
+
+/**
+ * msm_ipc_router_ipc_log() - Pass log data to IPC logging framework
+ * @tran:	Identifies the data to be a receive or send.
+ * @ipc_buf:	Buffer to extract the log data.
+ * @port_ptr:	IPC Router port corresponding to the current log data.
+ *
+ * This function builds the data the would be passed on to the IPC logging
+ * framework. The data that would be passed corresponds to the information
+ * that is exchanged between the IPC Router and user space modules during
+ * request/response/indication transactions.
+ */
+
+static void msm_ipc_router_ipc_log(uint8_t tran,
+			struct sk_buff *ipc_buf, struct msm_ipc_port *port_ptr)
+{
+	struct qmi_header *hdr = (struct qmi_header *)ipc_buf->data;
+
+	/*
+	 * IPC Logging format is as below:-
+	 * <Name>(Name of the User Space Process):
+	 * <PID> (PID of the user space process) :
+	 * <TID> (TID of the user space thread)  :
+	 * <User Space Module>(CLNT or  SERV)    :
+	 * <Opertaion Type> (Transmit)		 :
+	 * <Control Flag> (Req/Resp/Ind)	 :
+	 * <Transaction ID>			 :
+	 * <Message ID>				 :
+	 * <Message Length>			 :
+	 */
+	if (ipc_req_resp_log_txt &&
+		(((uint8_t) hdr->cntl_flag == QMI_REQUEST_CONTROL_FLAG) ||
+		((uint8_t) hdr->cntl_flag == QMI_RESPONSE_CONTROL_FLAG)) &&
+		(port_ptr->type == CLIENT_PORT ||
+					port_ptr->type == SERVER_PORT)) {
+		IPC_REQ_RESP_LOG(KERN_DEBUG,
+			"%s %d %d %s %s CF:%x TI:%x MI:%x ML:%x",
+			current->comm, current->tgid, current->pid,
+			(port_ptr->type == CLIENT_PORT ? "QCCI" : "QCSI"),
+			(tran == IPC_RECV ? "RX" :
+			(tran == IPC_SEND ? "TX" : "ERR")),
+			(uint8_t)hdr->cntl_flag, hdr->txn_id, hdr->msg_id,
+			hdr->msg_len);
+	} else if (ipc_ind_log_txt &&
+		((uint8_t)hdr->cntl_flag == QMI_INDICATION_CONTROL_FLAG) &&
+		(port_ptr->type == CLIENT_PORT ||
+					port_ptr->type == SERVER_PORT)) {
+		IPC_IND_LOG(KERN_DEBUG,
+			"%s %d %d %s %s CF:%x TI:%x MI:%x ML:%x",
+			current->comm, current->tgid, current->pid,
+			(port_ptr->type == CLIENT_PORT ? "QCCI" : "QCSI"),
+			(tran == IPC_RECV ? "RX" :
+			(tran == IPC_SEND ? "TX" : "ERR")),
+			(uint8_t)hdr->cntl_flag, hdr->txn_id, hdr->msg_id,
+			hdr->msg_len);
+	}
+}
 
 static struct sk_buff_head *msm_ipc_router_build_msg(unsigned int num_sect,
 					  struct iovec const *msg_sect,
@@ -263,6 +343,7 @@
 	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
 	struct sockaddr_msm_ipc *dest = (struct sockaddr_msm_ipc *)m->msg_name;
 	struct sk_buff_head *msg;
+	struct sk_buff *ipc_buf;
 	int ret;
 
 	if (!dest)
@@ -284,7 +365,8 @@
 
 	if (port_ptr->type == CLIENT_PORT)
 		wait_for_irsc_completion();
-
+	ipc_buf = skb_peek(msg);
+	msm_ipc_router_ipc_log(IPC_SEND, ipc_buf, port_ptr);
 	ret = msm_ipc_router_send_to(port_ptr, msg, &dest->address);
 	if (ret == (IPC_ROUTER_HDR_SIZE + total_len))
 		ret = total_len;
@@ -300,6 +382,7 @@
 	struct sock *sk = sock->sk;
 	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
 	struct sk_buff_head *msg;
+	struct sk_buff *ipc_buf;
 	long timeout;
 	int ret;
 
@@ -344,6 +427,8 @@
 	}
 
 	ret = msm_ipc_router_extract_msg(m, msg);
+	ipc_buf = skb_peek(msg);
+	msm_ipc_router_ipc_log(IPC_RECV, ipc_buf, port_ptr);
 	msm_ipc_router_release_msg(msg);
 	msg = NULL;
 	release_sock(sk);
@@ -400,6 +485,14 @@
 		if (server_arg.num_entries_in_array) {
 			srv_info_sz = server_arg.num_entries_in_array *
 					sizeof(*srv_info);
+			if ((srv_info_sz / sizeof(*srv_info)) !=
+			    server_arg.num_entries_in_array) {
+				pr_err("%s: Integer Overflow %d * %d\n",
+					__func__, sizeof(*srv_info),
+					server_arg.num_entries_in_array);
+				ret = -EINVAL;
+				break;
+			}
 			srv_info = kmalloc(srv_info_sz, GFP_KERNEL);
 			if (!srv_info) {
 				ret = -ENOMEM;
@@ -510,6 +603,29 @@
 	.obj_size       = sizeof(struct msm_ipc_sock),
 };
 
+/**
+ * msm_ipc_router_ipc_log_init() - Init function for IPC Logging
+ *
+ * Initialize the buffers to be used to provide the log information
+ * pertaining to the request, response and indication data flow that
+ * happens between user and kernel spaces.
+ */
+void msm_ipc_router_ipc_log_init(void)
+{
+	ipc_req_resp_log_txt =
+		ipc_log_context_create(REQ_RESP_IPC_LOG_PAGES, "req_resp");
+	if (!ipc_req_resp_log_txt) {
+		pr_err("%s: Unable to create IPC logging for Req/Resp",
+			__func__);
+	}
+	ipc_ind_log_txt =
+		ipc_log_context_create(IND_IPC_LOG_PAGES, "indication");
+	if (!ipc_ind_log_txt) {
+		pr_err("%s: Unable to create IPC logging for Indications",
+			__func__);
+	}
+}
+
 int msm_ipc_router_init_sockets(void)
 {
 	int ret;
@@ -528,6 +644,7 @@
 	}
 
 	sockets_enabled = 1;
+	msm_ipc_router_ipc_log_init();
 out_init_sockets:
 	return ret;
 }
diff --git a/arch/arm/mach-msm/krait-regulator.c b/arch/arm/mach-msm/krait-regulator.c
index 0c1e279..dc0b755 100644
--- a/arch/arm/mach-msm/krait-regulator.c
+++ b/arch/arm/mach-msm/krait-regulator.c
@@ -155,6 +155,7 @@
 	bool			pfm_mode;
 	int			pmic_min_uV_for_retention;
 	bool			retention_enabled;
+	bool			use_phase_switching;
 };
 
 static struct pmic_gang_vreg *the_gang;
@@ -390,13 +391,17 @@
 	return 0;
 }
 
-static int set_pmic_gang_phases(int phase_count)
+static int set_pmic_gang_phases(struct pmic_gang_vreg *pvreg, int phase_count)
 {
-	/*
-	 * TODO : spm writes for phase control,
-	 * pmic phase control is not working yet
-	 */
-	return 0;
+	pr_debug("programming phase_count = %d\n", phase_count);
+	if (pvreg->use_phase_switching)
+		/*
+		 * note the PMIC sets the phase count to one more than
+		 * the value in the register - hence subtract 1 from it
+		 */
+		return msm_spm_apcs_set_phase(phase_count - 1);
+	else
+		return 0;
 }
 
 static int set_pmic_gang_voltage(struct pmic_gang_vreg *pvreg, int uV)
@@ -547,14 +552,19 @@
 				int load_uA)
 {
 	struct pmic_gang_vreg *pvreg = from->pvreg;
-	int phase_count = DIV_ROUND_UP(load_uA, LOAD_PER_PHASE) - 1;
+	int phase_count = DIV_ROUND_UP(load_uA, LOAD_PER_PHASE);
 	int rc = 0;
 
-	if (phase_count < 0)
-		phase_count = 0;
+	if (phase_count <= 0)
+		phase_count = 1;
+
+	 /* Increase phases if it is less than the number of cpus online */
+	if (phase_count < num_online_cpus()) {
+		phase_count = num_online_cpus();
+	}
 
 	if (phase_count != pvreg->pmic_phase_count) {
-		rc = set_pmic_gang_phases(phase_count);
+		rc = set_pmic_gang_phases(pvreg, phase_count);
 		if (rc < 0) {
 			dev_err(&from->rdev->dev,
 				"%s failed set phase %d rc = %d\n",
@@ -577,32 +587,6 @@
 	return rc;
 }
 
-static int __devinit pvreg_init(struct platform_device *pdev)
-{
-	struct pmic_gang_vreg *pvreg;
-
-	pvreg = devm_kzalloc(&pdev->dev,
-			sizeof(struct pmic_gang_vreg), GFP_KERNEL);
-	if (!pvreg) {
-		pr_err("kzalloc failed.\n");
-		return -ENOMEM;
-	}
-
-	pvreg->name = "pmic_gang";
-	pvreg->pmic_vmax_uV = PMIC_VOLTAGE_MIN;
-	pvreg->pmic_phase_count = 1;
-	pvreg->retention_enabled = true;
-	pvreg->pmic_min_uV_for_retention = INT_MAX;
-
-	mutex_init(&pvreg->krait_power_vregs_lock);
-	INIT_LIST_HEAD(&pvreg->krait_power_vregs);
-	the_gang = pvreg;
-
-	pr_debug("name=%s inited\n", pvreg->name);
-
-	return 0;
-}
-
 static int krait_power_get_voltage(struct regulator_dev *rdev)
 {
 	struct krait_power_vreg *kvreg = rdev_get_drvdata(rdev);
@@ -925,24 +909,6 @@
 	int ldo_delta_uV;
 	int cpu_num;
 
-	/* Initialize the pmic gang if it hasn't been initialized already */
-	if (the_gang == NULL) {
-		rc = pvreg_init(pdev);
-		if (rc < 0) {
-			dev_err(&pdev->dev,
-				"failed to init pmic gang rc = %d\n", rc);
-			return rc;
-		}
-		/* global initializtion */
-		glb_init(pdev);
-	}
-
-	if (dent == NULL) {
-		dent = debugfs_create_dir(KRAIT_REGULATOR_DRIVER_NAME, NULL);
-		debugfs_create_file("retention_uV",
-				0644, dent, the_gang, &retention_fops);
-	}
-
 	if (pdev->dev.of_node) {
 		/* Get init_data from device tree. */
 		init_data = of_get_regulator_init_data(&pdev->dev,
@@ -1139,14 +1105,93 @@
 	},
 };
 
+static struct of_device_id krait_pdn_match_table[] = {
+	{ .compatible = "qcom,krait-pdn", },
+	{}
+};
+
+static int __devinit krait_pdn_probe(struct platform_device *pdev)
+{
+	int rc;
+	bool use_phase_switching = false;
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct pmic_gang_vreg *pvreg;
+
+	if (!dev->of_node) {
+		dev_err(dev, "device tree information missing\n");
+		return -ENODEV;
+	}
+
+	use_phase_switching = of_property_read_bool(node,
+						"qcom,use-phase-switching");
+	pvreg = devm_kzalloc(&pdev->dev,
+			sizeof(struct pmic_gang_vreg), GFP_KERNEL);
+	if (!pvreg) {
+		pr_err("kzalloc failed.\n");
+		return 0;
+	}
+
+	pvreg->name = "pmic_gang";
+	pvreg->pmic_vmax_uV = PMIC_VOLTAGE_MIN;
+	pvreg->pmic_phase_count = -EINVAL;
+	pvreg->retention_enabled = true;
+	pvreg->pmic_min_uV_for_retention = INT_MAX;
+	pvreg->use_phase_switching = use_phase_switching;
+
+	mutex_init(&pvreg->krait_power_vregs_lock);
+	INIT_LIST_HEAD(&pvreg->krait_power_vregs);
+	the_gang = pvreg;
+
+	pr_debug("name=%s inited\n", pvreg->name);
+
+	/* global initializtion */
+	glb_init(pdev);
+
+	rc = of_platform_populate(node, NULL, NULL, dev);
+	if (rc) {
+		dev_err(dev, "failed to add child nodes, rc=%d\n", rc);
+		return rc;
+	}
+
+	dent = debugfs_create_dir(KRAIT_REGULATOR_DRIVER_NAME, NULL);
+	debugfs_create_file("retention_uV",
+			0644, dent, the_gang, &retention_fops);
+	return 0;
+}
+
+static int __devexit krait_pdn_remove(struct platform_device *pdev)
+{
+	the_gang = NULL;
+	debugfs_remove_recursive(dent);
+	return 0;
+}
+
+static struct platform_driver krait_pdn_driver = {
+	.probe	= krait_pdn_probe,
+	.remove	= __devexit_p(krait_pdn_remove),
+	.driver	= {
+		.name		= KRAIT_PDN_DRIVER_NAME,
+		.of_match_table	= krait_pdn_match_table,
+		.owner		= THIS_MODULE,
+	},
+};
+
 int __init krait_power_init(void)
 {
-	return platform_driver_register(&krait_power_driver);
+	int rc = platform_driver_register(&krait_power_driver);
+	if (rc) {
+		pr_err("failed to add %s driver rc = %d\n",
+				KRAIT_REGULATOR_DRIVER_NAME, rc);
+		return rc;
+	}
+	return platform_driver_register(&krait_pdn_driver);
 }
 
 static void __exit krait_power_exit(void)
 {
 	platform_driver_unregister(&krait_power_driver);
+	platform_driver_unregister(&krait_pdn_driver);
 }
 module_exit(krait_power_exit);
 
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 90cb49e..806581d 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -1,7 +1,7 @@
 /* arch/arm/mach-msm/memory.c
  *
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -507,11 +507,10 @@
  */
 void adjust_meminfo(unsigned long start, unsigned long size)
 {
-	int i, j;
+	int i;
 
-	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
-		struct membank *bank = &meminfo.bank[j];
-		*bank = meminfo.bank[i];
+	for (i = 0; i < meminfo.nr_banks; i++) {
+		struct membank *bank = &meminfo.bank[i];
 
 		if (((start + size) <= (bank->start + bank->size)) &&
 			(start >= bank->start)) {
@@ -519,15 +518,15 @@
 				(meminfo.nr_banks - i) * sizeof(*bank));
 			meminfo.nr_banks++;
 			i++;
-			bank[1].size -= (start + size);
-			bank[1].start = (start + size);
-			bank[1].highmem = 0;
-			j++;
+
 			bank->size = start - bank->start;
+			bank[1].start = (start + size);
+			bank[1].size -= (bank->size + size);
+			bank[1].highmem = 0;
 		}
-		j++;
 	}
 }
+
 unsigned long get_ddr_size(void)
 {
 	unsigned int i;
diff --git a/arch/arm/mach-msm/msm_ipc_router_security.c b/arch/arm/mach-msm/msm_ipc_router_security.c
index 756e24e..69efd13 100644
--- a/arch/arm/mach-msm/msm_ipc_router_security.c
+++ b/arch/arm/mach-msm/msm_ipc_router_security.c
@@ -37,7 +37,7 @@
 	uint32_t instance_id;
 	unsigned reserved;
 	int num_group_info;
-	int *group_id;
+	gid_t *group_id;
 };
 
 static DEFINE_MUTEX(security_rules_lock);
@@ -98,6 +98,7 @@
 	struct config_sec_rules_args sec_rules_arg;
 	struct security_rule *rule, *temp_rule;
 	int key;
+	int group_info_sz;
 	int ret;
 
 	if (current_euid())
@@ -111,14 +112,20 @@
 	if (sec_rules_arg.num_group_info <= 0)
 		return -EINVAL;
 
+	group_info_sz = sec_rules_arg.num_group_info * sizeof(gid_t);
+	if ((group_info_sz / sizeof(gid_t)) != sec_rules_arg.num_group_info) {
+		pr_err("%s: Integer Overflow %d * %d\n", __func__,
+			sizeof(gid_t), sec_rules_arg.num_group_info);
+		return -EINVAL;
+	}
+
 	rule = kzalloc(sizeof(struct security_rule), GFP_KERNEL);
 	if (!rule) {
 		pr_err("%s: security_rule alloc failed\n", __func__);
 		return -ENOMEM;
 	}
 
-	rule->group_id = kzalloc((sec_rules_arg.num_group_info * sizeof(int)),
-				 GFP_KERNEL);
+	rule->group_id = kzalloc(group_info_sz, GFP_KERNEL);
 	if (!rule->group_id) {
 		pr_err("%s: group_id alloc failed\n", __func__);
 		kfree(rule);
@@ -131,7 +138,7 @@
 	rule->num_group_info = sec_rules_arg.num_group_info;
 	ret = copy_from_user(rule->group_id,
 			     ((void *)(arg + sizeof(sec_rules_arg))),
-			     (rule->num_group_info * sizeof(uint32_t)));
+			     group_info_sz);
 	if (ret) {
 		kfree(rule->group_id);
 		kfree(rule);
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index affb451..fc9a0fa 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -656,7 +656,8 @@
 void pil_shutdown(struct pil_desc *desc)
 {
 	struct pil_priv *priv = desc->priv;
-	desc->ops->shutdown(desc);
+	if (desc->ops->shutdown)
+		desc->ops->shutdown(desc);
 	if (proxy_timeout_ms == 0 && desc->ops->proxy_unvote)
 		desc->ops->proxy_unvote(desc);
 	else
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index c1d4ab4..aa42f5b 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,7 +57,7 @@
 #define RMB_PMI_CODE_LENGTH		0x18
 
 #define VDD_MSS_UV			1050000
-#define MAX_VDD_MX_UV			1050000
+#define MAX_VDD_MX_UV			1150000
 
 #define PROXY_TIMEOUT_MS		10000
 #define POLL_INTERVAL_US		50
@@ -462,7 +462,7 @@
 
 	if (!drv->is_loadable)
 		return 0;
-	/* MBA doesn't support shutdown */
+	pil_shutdown(&drv->desc);
 	pil_shutdown(&drv->q6->desc);
 	return 0;
 }
@@ -578,7 +578,7 @@
 	if (!drv->is_loadable)
 		return;
 
-	/* MBA doesn't support shutdown */
+	pil_shutdown(&drv->desc);
 	pil_shutdown(&drv->q6->desc);
 }
 
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index af0744c..c77304d 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -65,6 +65,12 @@
 	uint32_t modified_time_us;
 };
 
+struct msm_pm_sleep_status_data {
+	void *base_addr;
+	uint32_t cpu_offset;
+	uint32_t mask;
+};
+
 struct msm_pm_platform_data {
 	u8 idle_supported;   /* Allow device to enter mode during idle */
 	u8 suspend_supported; /* Allow device to enter mode during suspend */
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
index 658c07b..8153145 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_multi_aac.c
@@ -185,6 +185,25 @@
 		}
 		break;
 	}
+	case AUDIO_SET_AAC_MIX_CONFIG:	{
+		pr_debug("%s, AUDIO_SET_AAC_MIX_CONFIG", __func__);
+		if (copy_from_user(audio->codec_cfg, (void *)arg,
+			sizeof(unsigned long))) {
+			rc = -EFAULT;
+			break;
+		} else {
+			unsigned long *mix_coeff =
+					(unsigned long *)audio->codec_cfg;
+			pr_debug("%s, value of coeff = %lu",
+						__func__, *mix_coeff);
+			q6asm_cfg_aac_sel_mix_coef(audio->ac, *mix_coeff);
+			if (rc < 0)
+				pr_err("%s asm aac_sel_mix_coef failed rc=%d\n",
+								 __func__, rc);
+			break;
+		}
+		break;
+	}
 	default:
 		pr_debug("Calling utils ioctl\n");
 		rc = audio->codec_ioctl(file, cmd, arg);
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
index 11b1405..ff7ba33 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/version_b/q6usm_b.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -327,6 +327,7 @@
 
 	return usc;
 fail:
+	kfree(p_mem_handle);
 	q6usm_us_client_free(usc);
 	return NULL;
 fail_session:
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
index 4e09a9e..94923a0 100644
--- a/arch/arm/mach-msm/remote_spinlock.c
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -196,6 +196,8 @@
 /* end swp implementation --------------------------------------------------- */
 
 /* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
 static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
 {
 	unsigned long tmp;
@@ -267,7 +269,7 @@
 static void *hw_mutex_reg_base;
 static DEFINE_MUTEX(hw_map_init_lock);
 
-static char *compatible_string = "qcom,ipc-spinlock";
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
 
 static int init_hw_mutex(struct device_node *node)
 {
@@ -294,7 +296,7 @@
 {
 	struct device_node *node;
 
-	node = of_find_compatible_node(NULL, NULL, compatible_string);
+	node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
 	if (node) {
 		init_hw_mutex(node);
 	} else {
@@ -341,7 +343,9 @@
 
 static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
 {
-	return 1;
+	writel_relaxed(SPINLOCK_PID_APPS, lock);
+	smp_mb();
+	return readl_relaxed(lock) == SPINLOCK_PID_APPS;
 }
 
 static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
@@ -397,6 +401,23 @@
 }
 
 
+static int dt_node_is_valid(const struct device_node *node)
+{
+	const char *status;
+	int statlen;
+
+	status = of_get_property(node, "status", &statlen);
+	if (status == NULL)
+		return 1;
+
+	if (statlen > 0) {
+		if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+			return 1;
+	}
+
+	return 0;
+}
+
 static void initialize_ops(void)
 {
 	struct device_node *node;
@@ -435,23 +456,42 @@
 		is_hw_lock_type = 1;
 		break;
 	case AUTO_MODE:
-		node = of_find_compatible_node(NULL, NULL, compatible_string);
-		if (node) {
+		/*
+		 * of_find_compatible_node() returns a valid pointer even if
+		 * the status property is "disabled", so the validity needs
+		 * to be checked
+		 */
+		node = of_find_compatible_node(NULL, NULL,
+						sfpb_compatible_string);
+		if (node && dt_node_is_valid(node)) {
 			current_ops.lock = __raw_remote_sfpb_spin_lock;
 			current_ops.unlock = __raw_remote_sfpb_spin_unlock;
 			current_ops.trylock = __raw_remote_sfpb_spin_trylock;
 			current_ops.release = __raw_remote_gen_spin_release;
 			current_ops.owner = __raw_remote_gen_spin_owner;
 			is_hw_lock_type = 1;
-		} else {
+			break;
+		}
+
+		node = of_find_compatible_node(NULL, NULL,
+						ldrex_compatible_string);
+		if (node && dt_node_is_valid(node)) {
 			current_ops.lock = __raw_remote_ex_spin_lock;
 			current_ops.unlock = __raw_remote_ex_spin_unlock;
 			current_ops.trylock = __raw_remote_ex_spin_trylock;
 			current_ops.release = __raw_remote_gen_spin_release;
 			current_ops.owner = __raw_remote_gen_spin_owner;
 			is_hw_lock_type = 0;
-			pr_warn("Falling back to LDREX remote spinlock implementation");
+			break;
 		}
+
+		current_ops.lock = __raw_remote_ex_spin_lock;
+		current_ops.unlock = __raw_remote_ex_spin_unlock;
+		current_ops.trylock = __raw_remote_ex_spin_trylock;
+		current_ops.release = __raw_remote_gen_spin_release;
+		current_ops.owner = __raw_remote_gen_spin_owner;
+		is_hw_lock_type = 0;
+		pr_warn("Falling back to LDREX remote spinlock implementation");
 		break;
 	default:
 		BUG();
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 8725544..10e40b4 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -72,6 +72,7 @@
 #define SMD_VERSION 0x00020000
 #define SMSM_SNAPSHOT_CNT 64
 #define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
+#define RSPIN_INIT_WAIT_MS 1000
 
 uint32_t SMSM_NUM_ENTRIES = 8;
 uint32_t SMSM_NUM_HOSTS = 3;
@@ -2534,6 +2535,18 @@
 	struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
 	int i;
 	struct smsm_size_info_type *smsm_size_info;
+	unsigned long flags;
+	unsigned long j_start;
+
+	/* Verify that remote spinlock is not deadlocked */
+	j_start = jiffies;
+	while (!remote_spin_trylock_irqsave(&remote_spinlock, flags)) {
+		if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
+			panic("%s: Remote processor %d will not release spinlock\n",
+				__func__, remote_spin_owner(&remote_spinlock));
+		}
+	}
+	remote_spin_unlock_irqrestore(&remote_spinlock, flags);
 
 	smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
 				sizeof(struct smsm_size_info_type));
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index 9206016..4dcf72f 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -1,7 +1,7 @@
 /* arch/arm/mach-msm/smd_debug.c
  *
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  * Author: Brian Swetland <swetland@google.com>
  *
  * This software is licensed under the terms of the GNU General Public
@@ -103,14 +103,14 @@
 	const char *subsys_name;
 
 	i += scnprintf(buf + i, max - i,
-		"   Subsystem    | Interrupt ID |     In    | Out (Hardcoded) |"
-		" Out (Configured) |\n");
+		"   Subsystem    | Interrupt ID |    In     | Out (Hardcoded) |"
+		" Out (Configured)|\n");
 
 	for (subsys = 0; subsys < NUM_SMD_SUBSYSTEMS; ++subsys) {
 		subsys_name = smd_pid_to_subsystem(subsys);
 		if (subsys_name) {
 			i += scnprintf(buf + i, max - i,
-				"%-10s %4s |    %9d | %9u |       %9u |        %9u |\n",
+				"%-10s %4s |    %9d | %9u |       %9u |       %9u |\n",
 				smd_pid_to_subsystem(subsys), "smd",
 				stats->smd_interrupt_id,
 				stats->smd_in_count,
@@ -118,7 +118,7 @@
 				stats->smd_out_config_count);
 
 			i += scnprintf(buf + i, max - i,
-				"%-10s %4s |    %9d | %9u |       %9u |        %9u |\n",
+				"%-10s %4s |    %9d | %9u |       %9u |       %9u |\n",
 				smd_pid_to_subsystem(subsys), "smsm",
 				stats->smsm_interrupt_id,
 				stats->smsm_in_count,
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 1820b23..5969a3c 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -47,7 +47,7 @@
 
 struct smd_tty_info {
 	smd_channel_t *ch;
-	struct tty_struct *tty;
+	struct tty_port port;
 	struct wake_lock wake_lock;
 	int open_count;
 	struct tasklet_struct tty_tsklt;
@@ -125,7 +125,7 @@
 	unsigned char *ptr;
 	int avail;
 	struct smd_tty_info *info = (struct smd_tty_info *)param;
-	struct tty_struct *tty = info->tty;
+	struct tty_struct *tty = tty_port_tty_get(&info->port);
 	unsigned long flags;
 
 	if (!tty)
@@ -156,6 +156,7 @@
 		if (avail <= 0) {
 			mod_timer(&info->buf_req_timer,
 					jiffies + msecs_to_jiffies(30));
+			tty_kref_put(tty);
 			return;
 		}
 
@@ -173,11 +174,13 @@
 
 	/* XXX only when writable and necessary */
 	tty_wakeup(tty);
+	tty_kref_put(tty);
 }
 
 static void smd_tty_notify(void *priv, unsigned event)
 {
 	struct smd_tty_info *info = priv;
+	struct tty_struct *tty;
 	unsigned long flags;
 
 	switch (event) {
@@ -195,8 +198,10 @@
 		 */
 		if (smd_write_avail(info->ch)) {
 			smd_disable_read_intr(info->ch);
-			if (info->tty)
-				wake_up_interruptible(&info->tty->write_wait);
+			tty = tty_port_tty_get(&info->port);
+			if (tty)
+				wake_up_interruptible(&tty->write_wait);
+			tty_kref_put(tty);
 		}
 		spin_lock_irqsave(&info->ra_lock, flags);
 		if (smd_read_avail(info->ch)) {
@@ -225,9 +230,11 @@
 		/* schedule task to send TTY_BREAK */
 		tasklet_hi_schedule(&info->tty_tsklt);
 
-		if (info->tty->index == LOOPBACK_IDX)
+		tty = tty_port_tty_get(&info->port);
+		if (tty->index == LOOPBACK_IDX)
 			schedule_delayed_work(&loopback_work,
 					msecs_to_jiffies(1000));
+		tty_kref_put(tty);
 		break;
 	}
 }
@@ -241,7 +248,8 @@
 	return (modem_state & ready_state) == ready_state;
 }
 
-static int smd_tty_open(struct tty_struct *tty, struct file *f)
+static int smd_tty_port_activate(struct tty_port *tport,
+				 struct tty_struct *tty)
 {
 	int res = 0;
 	unsigned int n = tty->index;
@@ -306,8 +314,6 @@
 			}
 		}
 
-
-		info->tty = tty;
 		tasklet_init(&info->tty_tsklt, smd_tty_read,
 			     (unsigned long)info);
 		wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
@@ -354,24 +360,27 @@
 	return res;
 }
 
-static void smd_tty_close(struct tty_struct *tty, struct file *f)
+static void smd_tty_port_shutdown(struct tty_port *tport)
 {
-	struct smd_tty_info *info = tty->driver_data;
+	struct smd_tty_info *info;
+	struct tty_struct *tty = tty_port_tty_get(tport);
 	unsigned long flags;
 
-	if (info == 0)
+	info = tty->driver_data;
+	if (info == 0) {
+		tty_kref_put(tty);
 		return;
+	}
 
 	mutex_lock(&smd_tty_lock);
 	if (--info->open_count == 0) {
 		spin_lock_irqsave(&info->reset_lock, flags);
 		info->is_open = 0;
 		spin_unlock_irqrestore(&info->reset_lock, flags);
-		if (info->tty) {
+		if (tty) {
 			tasklet_kill(&info->tty_tsklt);
 			wake_lock_destroy(&info->wake_lock);
 			wake_lock_destroy(&info->ra_wake_lock);
-			info->tty = 0;
 		}
 		tty->driver_data = 0;
 		del_timer(&info->buf_req_timer);
@@ -382,6 +391,21 @@
 		}
 	}
 	mutex_unlock(&smd_tty_lock);
+	tty_kref_put(tty);
+}
+
+static int smd_tty_open(struct tty_struct *tty, struct file *f)
+{
+	struct smd_tty_info *info = smd_tty + tty->index;
+
+	return tty_port_open(&info->port, tty, f);
+}
+
+static void smd_tty_close(struct tty_struct *tty, struct file *f)
+{
+	struct smd_tty_info *info = tty->driver_data;
+
+	tty_port_close(&info->port, tty, f);
 }
 
 static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
@@ -482,6 +506,11 @@
 			  0, SMSM_SMD_LOOPBACK);
 }
 
+static const struct tty_port_operations smd_tty_port_ops = {
+	.shutdown = smd_tty_port_shutdown,
+	.activate = smd_tty_port_activate,
+};
+
 static struct tty_operations smd_tty_ops = {
 	.open = smd_tty_open,
 	.close = smd_tty_close,
@@ -523,6 +552,7 @@
 	int ret;
 	int n;
 	int idx;
+	struct tty_port *port;
 
 	smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
 	if (smd_tty_driver == 0)
@@ -578,6 +608,10 @@
 				continue;
 		}
 
+		port = &smd_tty[idx].port;
+		tty_port_init(port);
+		port->ops = &smd_tty_port_ops;
+		/* TODO: For kernel >= 3.7 use tty_port_register_device */
 		tty_register_device(smd_tty_driver, idx, 0);
 		init_completion(&smd_tty[idx].ch_allocated);
 
diff --git a/arch/arm/mach-msm/smp2p_debug.c b/arch/arm/mach-msm/smp2p_debug.c
index 1a5c96e..a493cbe 100644
--- a/arch/arm/mach-msm/smp2p_debug.c
+++ b/arch/arm/mach-msm/smp2p_debug.c
@@ -233,7 +233,7 @@
 	if (in_ptr) {
 		in_entries = (struct smp2p_entry_v1 *)((void *)in_ptr +
 				sizeof(struct smp2p_smem));
-		in_valid = SMP2P_GET_ENT_VALID(out_ptr->valid_total_ent);
+		in_valid = SMP2P_GET_ENT_VALID(in_ptr->valid_total_ent);
 	}
 
 	for (entry = 0; out_entries || in_entries; ++entry) {
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 6314e94..d177b05 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -7,7 +7,6 @@
 
 obj-$(CONFIG_MMU)		+= fault-armv.o flush.o idmap.o ioremap.o \
 				   mmap.o pgd.o mmu.o vmregion.o
-obj-$(CONFIG_DEBUG_RODATA)	+= rodata.o
 
 ifneq ($(CONFIG_MMU),y)
 obj-y				+= nommu.o
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0ebc2b9..bf59a9d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -224,7 +224,7 @@
  * allocations.  This must be the smallest DMA mask in the system,
  * so a successful GFP_DMA allocation will always satisfy this.
  */
-u32 arm_dma_limit;
+phys_addr_t arm_dma_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 	unsigned long dma_size)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 8877ddd..21653f2 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -65,9 +65,9 @@
 #endif
 
 #ifdef CONFIG_ZONE_DMA
-extern u32 arm_dma_limit;
+extern phys_addr_t arm_dma_limit;
 #else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
 #endif
 
 struct map_desc;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8575f78..25cb67c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -604,53 +604,30 @@
 	return early_alloc_aligned(sz, sz);
 }
 
-static pte_t * __init early_pte_alloc(pmd_t *pmd)
-{
-	if (pmd_none(*pmd) || pmd_bad(*pmd))
-		return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
-	return pmd_page_vaddr(*pmd);
-}
-
-static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
-{
-	__pmd_populate(pmd, __pa(pte), prot);
-	BUG_ON(pmd_bad(*pmd));
-}
-
-#ifdef CONFIG_HIGHMEM
-static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
-	unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
 {
 	if (pmd_none(*pmd)) {
-		pte_t *pte = early_pte_alloc(pmd);
-		early_pte_install(pmd, pte, prot);
+		pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+		__pmd_populate(pmd, __pa(pte), prot);
 	}
 	BUG_ON(pmd_bad(*pmd));
 	return pte_offset_kernel(pmd, addr);
 }
-#endif
 
 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 				  unsigned long end, unsigned long pfn,
 				  const struct mem_type *type)
 {
-	pte_t *start_pte = early_pte_alloc(pmd);
-	pte_t *pte = start_pte + pte_index(addr);
-
-	/* If replacing a section mapping, the whole section must be replaced */
-	BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
-
+	pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
 	do {
 		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
 		pfn++;
 	} while (pte++, addr += PAGE_SIZE, addr != end);
-	early_pte_install(pmd, start_pte, type->prot_l1);
 }
 
 static void __init alloc_init_section(pud_t *pud, unsigned long addr,
 				      unsigned long end, phys_addr_t phys,
-				      const struct mem_type *type,
-				      bool force_pages)
+				      const struct mem_type *type)
 {
 	pmd_t *pmd = pmd_offset(pud, addr);
 
@@ -660,7 +637,7 @@
 	 * L1 entries, whereas PGDs refer to a group of L1 entries making
 	 * up one logical pointer to an L2 table.
 	 */
-	if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
+	if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
 		pmd_t *p = pmd;
 
 #ifndef CONFIG_ARM_LPAE
@@ -684,15 +661,14 @@
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-	unsigned long end, unsigned long phys, const struct mem_type *type,
-	bool force_pages)
+	unsigned long end, unsigned long phys, const struct mem_type *type)
 {
 	pud_t *pud = pud_offset(pgd, addr);
 	unsigned long next;
 
 	do {
 		next = pud_addr_end(addr, end);
-		alloc_init_section(pud, addr, next, phys, type, force_pages);
+		alloc_init_section(pud, addr, next, phys, type);
 		phys += next - addr;
 	} while (pud++, addr = next, addr != end);
 }
@@ -766,7 +742,7 @@
  * offsets, and we take full advantage of sections and
  * supersections.
  */
-static void __init create_mapping(struct map_desc *md, bool force_pages)
+static void __init create_mapping(struct map_desc *md)
 {
 	unsigned long addr, length, end;
 	phys_addr_t phys;
@@ -818,7 +794,7 @@
 	do {
 		unsigned long next = pgd_addr_end(addr, end);
 
-		alloc_init_pud(pgd, addr, next, phys, type, force_pages);
+		alloc_init_pud(pgd, addr, next, phys, type);
 
 		phys += next - addr;
 		addr = next;
@@ -839,7 +815,7 @@
 	vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
 
 	for (md = io_desc; nr; md++, nr--) {
-		create_mapping(md, false);
+		create_mapping(md);
 		vm->addr = (void *)(md->virtual & PAGE_MASK);
 		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
 		vm->phys_addr = __pfn_to_phys(md->pfn); 
@@ -1199,12 +1175,12 @@
 	map.virtual = 0xffff0000;
 	map.length = PAGE_SIZE;
 	map.type = MT_HIGH_VECTORS;
-	create_mapping(&map, false);
+	create_mapping(&map);
 
 	if (!vectors_high()) {
 		map.virtual = 0;
 		map.type = MT_LOW_VECTORS;
-		create_mapping(&map, false);
+		create_mapping(&map);
 	}
 
 	/*
@@ -1224,7 +1200,7 @@
 			map.virtual = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
 			map.length = PAGE_SIZE;
 			map.type = MT_DEVICE_USER_ACCESSIBLE;
-			create_mapping(&map, false);
+			create_mapping(&map);
 		}
 	}
 
@@ -1241,7 +1217,7 @@
 static void __init kmap_init(void)
 {
 #ifdef CONFIG_HIGHMEM
-	pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
+	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
 		PKMAP_BASE, _PAGE_KERNEL_TABLE);
 #endif
 }
@@ -1349,14 +1325,12 @@
 static void __init map_lowmem(void)
 {
 	struct memblock_region *reg;
-	phys_addr_t start;
-	phys_addr_t end;
-	struct map_desc map;
 
 	/* Map all the lowmem memory banks. */
 	for_each_memblock(memory, reg) {
-		start = reg->base;
-		end = start + reg->size;
+		phys_addr_t start = reg->base;
+		phys_addr_t end = start + reg->size;
+		struct map_desc map;
 
 		if (end > arm_lowmem_limit)
 			end = arm_lowmem_limit;
@@ -1370,28 +1344,28 @@
 			map.length = SECTION_SIZE;
 			map.type = MT_MEMORY;
 
-			create_mapping(&map, false);
+			create_mapping(&map);
 
 			map.pfn = __phys_to_pfn(start + SECTION_SIZE);
 			map.virtual = __phys_to_virt(start + SECTION_SIZE);
 			map.length = (unsigned long)RX_AREA_END - map.virtual;
 			map.type = MT_MEMORY_RX;
 
-			create_mapping(&map, false);
+			create_mapping(&map);
 
 			map.pfn = __phys_to_pfn(__pa(__start_rodata));
 			map.virtual = (unsigned long)__start_rodata;
 			map.length = __init_begin - __start_rodata;
 			map.type = MT_MEMORY_R;
 
-			create_mapping(&map, false);
+			create_mapping(&map);
 
 			map.pfn = __phys_to_pfn(__pa(__init_begin));
 			map.virtual = (unsigned long)__init_begin;
 			map.length = __init_data - __init_begin;
 			map.type = MT_MEMORY;
 
-			create_mapping(&map, false);
+			create_mapping(&map);
 
 			map.pfn = __phys_to_pfn(__pa(__init_data));
 			map.virtual = (unsigned long)__init_data;
@@ -1406,20 +1380,8 @@
 		map.type = MT_MEMORY;
 #endif
 
-		create_mapping(&map, false);
+		create_mapping(&map);
 	}
-
-#ifdef CONFIG_DEBUG_RODATA
-	start = __pa(_stext) & PMD_MASK;
-	end = ALIGN(__pa(__end_rodata), PMD_SIZE);
-
-	map.pfn = __phys_to_pfn(start);
-	map.virtual = __phys_to_virt(start);
-	map.length = end - start;
-	map.type = MT_MEMORY;
-
-	create_mapping(&map, true);
-#endif
 }
 
 /*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
deleted file mode 100644
index 9a8eb84..0000000
--- a/arch/arm/mm/rodata.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- *  linux/arch/arm/mm/rodata.c
- *
- *  Copyright (C) 2011 Google, Inc.
- *
- *  Author: Colin Cross <ccross@android.com>
- *
- *  Based on x86 implementation in arch/x86/mm/init_32.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-
-#include <asm/cache.h>
-#include <asm/pgtable.h>
-#include <asm/rodata.h>
-#include <asm/sections.h>
-#include <asm/tlbflush.h>
-
-#include "mm.h"
-
-static int kernel_set_to_readonly __read_mostly;
-
-#ifdef CONFIG_DEBUG_RODATA_TEST
-static const int rodata_test_data = 0xC3;
-
-static noinline void rodata_test(void)
-{
-	int result;
-
-	pr_info("%s: attempting to write to read-only section:\n", __func__);
-
-	if (*(volatile int *)&rodata_test_data != 0xC3) {
-		pr_err("read only data changed before test\n");
-		return;
-	}
-
-	/*
-	 * Attempt to to write to rodata_test_data, trapping the expected
-	 * data abort.  If the trap executed, result will be 1.  If it didn't,
-	 * result will be 0xFF.
-	 */
-	asm volatile(
-		"0:	str	%[zero], [%[rodata_test_data]]\n"
-		"	mov	%[result], #0xFF\n"
-		"	b	2f\n"
-		"1:	mov	%[result], #1\n"
-		"2:\n"
-
-		/* Exception fixup - if store at label 0 faults, jumps to 1 */
-		".pushsection __ex_table, \"a\"\n"
-		"	.long	0b, 1b\n"
-		".popsection\n"
-
-		: [result] "=r" (result)
-		: [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
-		: "memory"
-	);
-
-	if (result == 1)
-		pr_info("write to read-only section trapped, success\n");
-	else
-		pr_err("write to read-only section NOT trapped, test failed\n");
-
-	if (*(volatile int *)&rodata_test_data != 0xC3)
-		pr_err("read only data changed during write\n");
-}
-#else
-static inline void rodata_test(void) { }
-#endif
-
-static int set_page_attributes(unsigned long virt, int numpages,
-	pte_t (*f)(pte_t))
-{
-	pmd_t *pmd;
-	pte_t *pte;
-	unsigned long start = virt;
-	unsigned long end = virt + (numpages << PAGE_SHIFT);
-	unsigned long pmd_end;
-
-	while (virt < end) {
-		pmd = pmd_off_k(virt);
-		pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
-
-		if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
-			pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
-				__func__, pmd, pmd_val(*pmd), virt);
-			virt = pmd_end;
-			continue;
-		}
-
-		while (virt < pmd_end) {
-			pte = pte_offset_kernel(pmd, virt);
-			set_pte_ext(pte, f(*pte), 0);
-			virt += PAGE_SIZE;
-		}
-	}
-
-	flush_tlb_kernel_range(start, end);
-
-	return 0;
-}
-
-int set_memory_ro(unsigned long virt, int numpages)
-{
-	return set_page_attributes(virt, numpages, pte_wrprotect);
-}
-EXPORT_SYMBOL(set_memory_ro);
-
-int set_memory_rw(unsigned long virt, int numpages)
-{
-	return set_page_attributes(virt, numpages, pte_mkwrite);
-}
-EXPORT_SYMBOL(set_memory_rw);
-
-void set_kernel_text_rw(void)
-{
-	unsigned long start = PAGE_ALIGN((unsigned long)_text);
-	unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
-
-	if (!kernel_set_to_readonly)
-		return;
-
-	pr_debug("Set kernel text: %lx - %lx to read-write\n",
-		 start, start + size);
-
-	set_memory_rw(start, size >> PAGE_SHIFT);
-}
-
-void set_kernel_text_ro(void)
-{
-	unsigned long start = PAGE_ALIGN((unsigned long)_text);
-	unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
-
-	if (!kernel_set_to_readonly)
-		return;
-
-	pr_info_once("Write protecting the kernel text section %lx - %lx\n",
-		start, start + size);
-
-	pr_debug("Set kernel text: %lx - %lx to read only\n",
-		 start, start + size);
-
-	set_memory_ro(start, size >> PAGE_SHIFT);
-}
-
-void mark_rodata_ro(void)
-{
-	kernel_set_to_readonly = 1;
-
-	set_kernel_text_ro();
-
-	rodata_test();
-}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index a73d713..adead10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -150,4 +150,6 @@
 
 source "drivers/coresight/Kconfig"
 
+source "drivers/bif/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index f461e83..d55b035 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -142,3 +142,5 @@
 obj-$(CONFIG_MOBICORE_SUPPORT)  += gud/
 
 obj-$(CONFIG_CORESIGHT)		+= coresight/
+
+obj-$(CONFIG_BIF)		+= bif/
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 81409b0..3a8bbc5 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -9,6 +9,9 @@
  * modify it under the terms of the GNU General Public License as
  * published by the Free Software Foundation; either version 2 of the
  * License or (at your optional) any later version of the license.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2 license
+ * terms, and distributes only under these terms.
  */
 
 #define pr_fmt(fmt) "cma: " fmt
@@ -24,6 +27,9 @@
 
 #include <linux/memblock.h>
 #include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
 #include <linux/page-isolation.h>
@@ -43,7 +49,48 @@
 	unsigned long	*bitmap;
 };
 
-struct cma *dma_contiguous_default_area;
+static DEFINE_MUTEX(cma_mutex);
+
+struct cma *dma_contiguous_def_area;
+phys_addr_t dma_contiguous_def_base;
+
+static struct cma_area {
+	phys_addr_t base;
+	unsigned long size;
+	struct cma *cma;
+	const char *name;
+} cma_areas[MAX_CMA_AREAS];
+static unsigned cma_area_count;
+
+
+static struct cma_map {
+	phys_addr_t base;
+	struct device *dev;
+} cma_maps[MAX_CMA_AREAS] __initdata;
+static unsigned cma_map_count __initdata;
+
+static struct cma *cma_get_area(phys_addr_t base)
+{
+	int i;
+	for (i = 0; i < cma_area_count; i++)
+		if (cma_areas[i].base == base)
+			return cma_areas[i].cma;
+	return NULL;
+}
+
+static struct cma *cma_get_area_by_name(const char *name)
+{
+	int i;
+	if (!name)
+		return NULL;
+
+	for (i = 0; i < cma_area_count; i++)
+		if (cma_areas[i].name && strcmp(cma_areas[i].name, name) == 0)
+			return cma_areas[i].cma;
+	return NULL;
+}
+
+
 
 #ifdef CONFIG_CMA_SIZE_MBYTES
 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -61,8 +108,8 @@
  * Users, who want to set the size of global CMA area for their system
  * should use cma= kernel parameter.
  */
-static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
-static long size_cmdline = -1;
+static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static phys_addr_t size_cmdline = -1;
 
 static int __init early_cma(char *p)
 {
@@ -74,7 +121,7 @@
 
 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
 
-static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
 {
 	struct memblock_region *reg;
 	unsigned long total_pages = 0;
@@ -92,52 +139,13 @@
 
 #else
 
-static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
 {
 	return 0;
 }
 
 #endif
 
-/**
- * dma_contiguous_reserve() - reserve area for contiguous memory handling
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
- * has been activated and all other subsystems have already allocated/reserved
- * memory.
- */
-void __init dma_contiguous_reserve(phys_addr_t limit)
-{
-	unsigned long selected_size = 0;
-
-	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
-
-	if (size_cmdline != -1) {
-		selected_size = size_cmdline;
-	} else {
-#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
-		selected_size = size_bytes;
-#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
-		selected_size = cma_early_percent_memory();
-#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
-		selected_size = min(size_bytes, cma_early_percent_memory());
-#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
-		selected_size = max(size_bytes, cma_early_percent_memory());
-#endif
-	}
-
-	if (selected_size) {
-		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
-			 selected_size / SZ_1M);
-
-		dma_declare_contiguous(NULL, selected_size, 0, limit);
-	}
-};
-
-static DEFINE_MUTEX(cma_mutex);
-
 static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
 {
 	unsigned long pfn = base_pfn;
@@ -194,55 +202,109 @@
 	return ERR_PTR(ret);
 }
 
-static struct cma_reserved {
-	phys_addr_t start;
-	unsigned long size;
-	struct device *dev;
-} cma_reserved[MAX_CMA_AREAS] __initdata;
-static unsigned cma_reserved_count __initdata;
+/*****************************************************************************/
 
-static int __init cma_init_reserved_areas(void)
+#ifdef CONFIG_OF
+int __init cma_fdt_scan(unsigned long node, const char *uname,
+				int depth, void *data)
 {
-	struct cma_reserved *r = cma_reserved;
-	unsigned i = cma_reserved_count;
+	phys_addr_t base, size;
+	unsigned long len;
+	__be32 *prop;
+	char *name;
 
-	pr_debug("%s()\n", __func__);
+	if (strncmp(uname, "region@", 7) != 0 || depth != 2 ||
+	    !of_get_flat_dt_prop(node, "linux,contiguous-region", NULL))
+		return 0;
 
-	for (; i; --i, ++r) {
-		struct cma *cma;
-		cma = cma_create_area(PFN_DOWN(r->start),
-				      r->size >> PAGE_SHIFT);
-		if (!IS_ERR(cma))
-			dev_set_cma_area(r->dev, cma);
-	}
+	prop = of_get_flat_dt_prop(node, "reg", &len);
+	if (!prop || (len != 2 * sizeof(unsigned long)))
+		return 0;
+
+	base = be32_to_cpu(prop[0]);
+	size = be32_to_cpu(prop[1]);
+
+	name = of_get_flat_dt_prop(node, "label", NULL);
+
+	pr_info("Found %s, memory base %lx, size %ld MiB\n", uname,
+		(unsigned long)base, (unsigned long)size / SZ_1M);
+	dma_contiguous_reserve_area(size, &base, 0, name);
+
 	return 0;
 }
-core_initcall(cma_init_reserved_areas);
+#endif
 
 /**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- *			      for particular device
- * @dev:   Pointer to device structure.
- * @size:  Size of the reserved memory.
- * @base:  Start address of the reserved memory (optional, 0 for any).
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
  * @limit: End address of the reserved memory (optional, 0 for any).
  *
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. It reserves contiguous areas for global, device independent
+ * allocations and (optionally) all areas defined in device tree structures.
  */
-int __init dma_declare_contiguous(struct device *dev, unsigned long size,
-				  phys_addr_t base, phys_addr_t limit)
+void __init dma_contiguous_reserve(phys_addr_t limit)
 {
-	struct cma_reserved *r = &cma_reserved[cma_reserved_count];
-	unsigned long alignment;
+	phys_addr_t sel_size = 0;
+
+	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+	if (size_cmdline != -1) {
+		sel_size = size_cmdline;
+	} else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+		sel_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+		sel_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+		sel_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+		sel_size = max(size_bytes, cma_early_percent_memory());
+#endif
+	}
+
+	if (sel_size) {
+		phys_addr_t base = 0;
+		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+			 (unsigned long)sel_size / SZ_1M);
+
+		if (dma_contiguous_reserve_area(sel_size, &base, limit, NULL)
+		    == 0)
+			dma_contiguous_def_base = base;
+	}
+#ifdef CONFIG_OF
+	of_scan_flat_dt(cma_fdt_scan, NULL);
+#endif
+};
+
+/**
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Pointer to the base address of the reserved area, also used to return
+ * 	  base address of the actually reserved area, optional, use pointer to
+ *	  0 for any
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
+ */
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
+				       phys_addr_t limit, const char *name)
+{
+	phys_addr_t base = *res_base;
+	phys_addr_t alignment;
+	int ret = 0;
 
 	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
 		 (unsigned long)size, (unsigned long)base,
 		 (unsigned long)limit);
 
 	/* Sanity checks */
-	if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
 		pr_err("Not enough slots for CMA reserved regions!\n");
 		return -ENOSPC;
 	}
@@ -251,7 +313,7 @@
 		return -EINVAL;
 
 	/* Sanitise input arguments */
-	alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
 	base = ALIGN(base, alignment);
 	size = ALIGN(size, alignment);
 	limit &= ~(alignment - 1);
@@ -260,7 +322,7 @@
 	if (base) {
 		if (memblock_is_region_reserved(base, size) ||
 		    memblock_reserve(base, size) < 0) {
-			base = -EBUSY;
+			ret = -EBUSY;
 			goto err;
 		}
 	} else {
@@ -270,11 +332,7 @@
 		 */
 		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
 		if (!addr) {
-			base = -ENOMEM;
-			goto err;
-		} else if (addr + size > ~(unsigned long)0) {
-			memblock_free(addr, size);
-			base = -EINVAL;
+			ret = -ENOMEM;
 			goto err;
 		} else {
 			base = addr;
@@ -285,22 +343,112 @@
 	 * Each reserved area must be initialised later, when more kernel
 	 * subsystems (like slab allocator) are available.
 	 */
-	r->start = base;
-	r->size = size;
-	r->dev = dev;
-	cma_reserved_count++;
-	pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+	cma_areas[cma_area_count].base = base;
+	cma_areas[cma_area_count].size = size;
+	cma_areas[cma_area_count].name = name;
+	cma_area_count++;
+	*res_base = base;
+
+	pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
 		(unsigned long)base);
 
 	/* Architecture specific contiguous memory fixup. */
 	dma_contiguous_early_fixup(base, size);
 	return 0;
 err:
-	pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
-	return base;
+	pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
+	return ret;
 }
 
 /**
+ * dma_contiguous_add_device() - add device to custom contiguous reserved area
+ * @dev:   Pointer to device structure.
+ * @base: Pointer to the base address of the reserved area returned by
+ *        dma_contiguous_reserve_area() function, also used to return
+ *
+ * This function assigns the given device to the contiguous memory area
+ * reserved earlier by dma_contiguous_reserve_area() function.
+ */
+int __init dma_contiguous_add_device(struct device *dev, phys_addr_t base)
+{
+	if (cma_map_count == ARRAY_SIZE(cma_maps)) {
+		pr_err("Not enough slots for CMA reserved regions!\n");
+		return -ENOSPC;
+	}
+	cma_maps[cma_map_count].dev = dev;
+	cma_maps[cma_map_count].base = base;
+	cma_map_count++;
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static void cma_assign_device_from_dt(struct device *dev)
+{
+	struct device_node *node;
+	struct cma *cma;
+	const char *name;
+	u32 value;
+
+	node = of_parse_phandle(dev->of_node, "linux,contiguous-region", 0);
+	if (!node)
+		return;
+	if (of_property_read_u32(node, "reg", &value) && !value)
+		return;
+
+	if (of_property_read_string(node, "label", &name))
+		return;
+
+	cma = cma_get_area_by_name(name);
+	if (!cma)
+		return;
+
+	dev_set_cma_area(dev, cma);
+	pr_info("Assigned CMA region at %lx to %s device\n", (unsigned long)value, dev_name(dev));
+}
+
+static int cma_device_init_notifier_call(struct notifier_block *nb,
+					 unsigned long event, void *data)
+{
+	struct device *dev = data;
+	if (event == BUS_NOTIFY_ADD_DEVICE && dev->of_node)
+		cma_assign_device_from_dt(dev);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block cma_dev_init_nb = {
+	.notifier_call = cma_device_init_notifier_call,
+};
+#endif
+
+static int __init cma_init_reserved_areas(void)
+{
+	struct cma *cma;
+	int i;
+
+	for (i = 0; i < cma_area_count; i++) {
+		phys_addr_t base = PFN_DOWN(cma_areas[i].base);
+		unsigned int count = cma_areas[i].size >> PAGE_SHIFT;
+
+		cma = cma_create_area(base, count);
+		if (!IS_ERR(cma))
+			cma_areas[i].cma = cma;
+	}
+
+	dma_contiguous_def_area = cma_get_area(dma_contiguous_def_base);
+
+	for (i = 0; i < cma_map_count; i++) {
+		cma = cma_get_area(cma_maps[i].base);
+		dev_set_cma_area(cma_maps[i].dev, cma);
+	}
+
+#ifdef CONFIG_OF
+	bus_register_notifier(&platform_bus_type, &cma_dev_init_nb);
+#endif
+	return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
  * dma_alloc_from_contiguous() - allocate pages from contiguous area
  * @dev:   Pointer to device for which the allocation is performed.
  * @count: Requested number of pages.
@@ -316,6 +464,7 @@
 {
 	unsigned long mask, pfn, pageno, start = 0;
 	struct cma *cma = dev_get_cma_area(dev);
+	struct page *page = NULL;
 	int ret;
 	int tries = 0;
 
@@ -338,18 +487,17 @@
 	for (;;) {
 		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
 						    start, count, mask);
-		if (pageno >= cma->count) {
-			ret = -ENOMEM;
-			goto error;
-		}
+		if (pageno >= cma->count)
+			break;
 
 		pfn = cma->base_pfn + pageno;
 		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
 		if (ret == 0) {
 			bitmap_set(cma->bitmap, pageno, count);
+			page = pfn_to_page(pfn);
 			break;
 		} else if (ret != -EBUSY) {
-			goto error;
+			break;
 		}
 		tries++;
 		trace_dma_alloc_contiguous_retry(tries);
@@ -361,12 +509,8 @@
 	}
 
 	mutex_unlock(&cma_mutex);
-
-	pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
-	return pfn_to_page(pfn);
-error:
-	mutex_unlock(&cma_mutex);
-	return NULL;
+	pr_debug("%s(): returned %p\n", __func__, page);
+	return page;
 }
 
 /**
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
index a97a503..2e35996 100644
--- a/drivers/base/sync.c
+++ b/drivers/base/sync.c
@@ -616,9 +616,11 @@
 	}
 
 	if (fence->status == 0) {
-		pr_info("fence timeout on [%p] after %dms\n", fence,
-			jiffies_to_msecs(timeout));
-		sync_dump();
+		if (timeout > 0) {
+			pr_info("fence timeout on [%p] after %dms\n", fence,
+				jiffies_to_msecs(timeout));
+			sync_dump();
+		}
 		return -ETIME;
 	}
 
diff --git a/drivers/bif/Kconfig b/drivers/bif/Kconfig
new file mode 100644
index 0000000..502b92b
--- /dev/null
+++ b/drivers/bif/Kconfig
@@ -0,0 +1,12 @@
+#
+# BIF framework and drivers
+#
+menuconfig BIF
+	bool "MIPI-BIF support"
+	select CRC_CCITT
+	select BITREVERSE
+	help
+	  MIPI-BIF (battery interface) is a one-wire serial interface between a
+	  host master device and one or more slave devices which are located in
+	  a battery pack or also on the host.  Enabling this option allows for
+	  BIF consumer drivers to issue transactions via BIF controller drivers.
diff --git a/drivers/bif/Makefile b/drivers/bif/Makefile
new file mode 100644
index 0000000..02528c1
--- /dev/null
+++ b/drivers/bif/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for kernel BIF framework.
+#
+obj-$(CONFIG_BIF)			+= bif-core.o
diff --git a/drivers/bif/bif-core.c b/drivers/bif/bif-core.c
new file mode 100644
index 0000000..e11e6ba4
--- /dev/null
+++ b/drivers/bif/bif-core.c
@@ -0,0 +1,2934 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitrev.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/bif/consumer.h>
+#include <linux/bif/driver.h>
+
+/**
+ * struct bif_ctrl_dev - holds controller device specific information
+ * @list:			Doubly-linked list parameter linking to other
+ *				BIF controllers registered in the system
+ * @desc:			Description structure for this BIF controller
+ * @mutex:			Mutex lock that is used to ensure mutual
+ *				exclusion between transactions performed on the
+ *				BIF bus for this controller
+ * @ctrl_dev:			Device pointer to the BIF controller device
+ * @driver_data:		Private data used by the BIF controller
+ * @selected_sdev:		Slave device that is currently selected on
+ *				the BIF bus of this controller
+ * @bus_change_notifier:	Head of a notifier list containing notifier
+ *				blocks that are notified when the battery
+ *				presence changes
+ * @enter_irq_mode_work:	Work task that is scheduled after a transaction
+ *				completes when there are consumers that are
+ *				actively monitoring BIF slave interrupts
+ * @irq_count:			This is a count of the total number of BIF slave
+ *				interrupts that are currently being monitored
+ *				for the BIF slaves connected to this BIF
+ *				controller
+ * @irq_mode_delay_jiffies:	Number of jiffies to wait before scheduling the
+ *				enter IRQ mode task.  Using a larger value
+ *				helps to improve the performance of BIF
+ *				consumers that perform many BIF transactions.
+ *				Using a smaller value reduces the latency of
+ *				BIF slave interrupts.
+ * @battery_present:		Cached value of the battery presence.  This is
+ *				used to filter out spurious presence update
+ *				calls when the battery presence state has not
+ *				changed.
+ */
+struct bif_ctrl_dev {
+	struct list_head		list;
+	struct bif_ctrl_desc		*desc;
+	struct mutex			mutex;
+	struct device			*ctrl_dev;
+	void				*driver_data;
+	struct bif_slave_dev		*selected_sdev;
+	struct blocking_notifier_head	bus_change_notifier;
+	struct delayed_work		enter_irq_mode_work;
+	int				irq_count;
+	int				irq_mode_delay_jiffies;
+	bool				battery_present;
+};
+
+/**
+ * struct bif_ctrl - handle used by BIF consumers for bus oriented BIF
+ *			operations
+ * @bdev:		Pointer to BIF controller device
+ * @exclusive_lock:	Flag which indicates that the BIF consumer responsible
+ *			for this handle has locked the BIF bus of this
+ *			controller.  BIF transactions from other consumers are
+ *			blocked until the bus is unlocked.
+ */
+struct bif_ctrl {
+	struct bif_ctrl_dev	*bdev;
+	bool			exclusive_lock;
+};
+
+/**
+ * struct bif_slave_dev - holds BIF slave device information
+ * @list:			Doubly-linked list parameter linking to other
+ *				BIF slaves that have been enumerated
+ * @bdev:			Pointer to the BIF controller device that this
+ *				slave is physically connected to
+ * @slave_addr:			8-bit BIF DEV_ADR assigned to this slave
+ * @unique_id:			80-bit BIF unique ID of the slave
+ * @unique_id_bits_known:	Number of bits of the UID that are currently
+ *				known.  This number starts is incremented during
+ *				a UID search and must end at 80 if the slave
+ *				responds to the search properly.
+ * @present:			Boolean value showing if this slave is
+*				physically present in the system at a given
+*				point in time.  The value is set to false if the
+*				battery pack containing the slave is
+*				disconnected.
+ * @l1_data:			BIF DDB L1 data of the slave as read from the
+ *				slave's memory
+ * @function_directory:		Pointer to the BIF DDB L2 function directory
+ *				list as read from the slave's memory
+ * @protocol_function:		Pointer to constant protocol function data as
+ *				well as software state information if the slave
+ *				has a protocol function
+ * @slave_ctrl_function:	Pointer to constant slave control function data
+ *				as well as software state information if the
+ *				slave has a slave control function
+ * @nvm_function:		Pointer to constant non-volatile memory function
+ *				data as well as software state information if
+ *				the slave has a non-volatile memory function
+ *
+ * bif_slave_dev objects are stored indefinitely after enumeration in order to
+ * speed up battery reinsertion.  Only a UID check is needed after inserting a
+ * battery assuming it has been enumerated before.
+ *
+ * unique_id bytes are stored such that unique_id[0] = MSB and
+ * unique_id[BIF_UNIQUE_ID_BYTE_LENGTH - 1] = LSB
+ */
+struct bif_slave_dev {
+	struct list_head			list;
+	struct bif_ctrl_dev			*bdev;
+	u8					slave_addr;
+	u8				unique_id[BIF_UNIQUE_ID_BYTE_LENGTH];
+	int					unique_id_bits_known;
+	bool					present;
+	struct bif_ddb_l1_data			l1_data;
+	struct bif_ddb_l2_data			*function_directory;
+	struct bif_protocol_function		*protocol_function;
+	struct bif_slave_control_function	*slave_ctrl_function;
+	struct bif_nvm_function			*nvm_function;
+};
+
+/**
+ * struct bif_slave - handle used by BIF consumers for slave oriented BIF
+ *			operations
+ * @ctrl:		Consumer BIF controller handle data
+ * @sdev:		Pointer to BIF slave device
+ */
+struct bif_slave {
+	struct bif_ctrl				ctrl;
+	struct bif_slave_dev			*sdev;
+};
+
+/* Number of times to retry a full BIF transaction before returning an error. */
+#define BIF_TRANSACTION_RETRY_COUNT	5
+
+static DEFINE_MUTEX(bif_ctrl_list_mutex);
+static LIST_HEAD(bif_ctrl_list);
+static DEFINE_MUTEX(bif_sdev_list_mutex);
+static LIST_HEAD(bif_sdev_list);
+
+static u8 next_dev_addr = 0x02;
+
+#define DEBUG_PRINT_BUFFER_SIZE 256
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+	int pos = 0;
+	int i;
+
+	for (i = 0; i < buf_len; i++) {
+		pos += scnprintf(str + pos, str_len - pos, "0x%02X", buf[i]);
+		if (i < buf_len - 1)
+			pos += scnprintf(str + pos, str_len - pos, ", ");
+	}
+}
+
+static void bif_print_slave_data(struct bif_slave_dev *sdev)
+{
+	char str[DEBUG_PRINT_BUFFER_SIZE];
+	u8 *uid;
+	int i, j;
+	struct bif_object *object;
+
+	if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH)
+		return;
+
+	uid = sdev->unique_id;
+	pr_debug("BIF slave: 0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+		uid[0], uid[1], uid[2], uid[3], uid[4], uid[5], uid[6],
+		uid[7], uid[8], uid[9]);
+	pr_debug("  present=%d, dev_adr=0x%02X\n", sdev->present,
+		sdev->slave_addr);
+	pr_debug("  revision=0x%02X, level=0x%02X, device class=0x%04X\n",
+		sdev->l1_data.revision, sdev->l1_data.level,
+		sdev->l1_data.device_class);
+	pr_debug("  manufacturer ID=0x%04X, product ID=0x%04X\n",
+		sdev->l1_data.manufacturer_id, sdev->l1_data.product_id);
+	pr_debug("  function directory length=%d\n", sdev->l1_data.length);
+
+	for (i = 0; i < sdev->l1_data.length / 4; i++) {
+		pr_debug("  Function %d: type=0x%02X, version=0x%02X, pointer=0x%04X\n",
+			i, sdev->function_directory[i].function_type,
+			sdev->function_directory[i].function_version,
+			sdev->function_directory[i].function_pointer);
+	}
+
+	if (sdev->nvm_function) {
+		pr_debug("  NVM function: pointer=0x%04X, task=%d, wr_buf_size=%d, nvm_base=0x%04X, nvm_size=%d\n",
+			sdev->nvm_function->nvm_pointer,
+			sdev->nvm_function->slave_control_channel,
+			(sdev->nvm_function->write_buffer_size
+				? sdev->nvm_function->write_buffer_size : 0),
+			sdev->nvm_function->nvm_base_address,
+			sdev->nvm_function->nvm_size);
+		if (sdev->nvm_function->object_count)
+			pr_debug("  NVM objects:\n");
+		i = 0;
+		list_for_each_entry(object, &sdev->nvm_function->object_list,
+					list) {
+			pr_debug("    Object %d - addr=0x%04X, data len=%d, type=0x%02X, version=0x%02X, manufacturer ID=0x%04X, crc=0x%04X\n",
+				i, object->addr, object->length - 8,
+				object->type, object->version,
+				object->manufacturer_id, object->crc);
+			for (j = 0; j < DIV_ROUND_UP(object->length - 8, 16);
+					j++) {
+				fill_string(str, DEBUG_PRINT_BUFFER_SIZE,
+					object->data + j * 16,
+					min(16, object->length - 8 - (j * 16)));
+				pr_debug("      data(0x%04X): %s\n", j * 16,
+					str);
+			}
+			i++;
+		}
+	}
+}
+
+static void bif_print_slaves(void)
+{
+	struct bif_slave_dev *sdev;
+
+	mutex_lock(&bif_sdev_list_mutex);
+
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		/* Skip slaves without fully known UIDs. */
+		if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH)
+			continue;
+		bif_print_slave_data(sdev);
+	}
+
+	mutex_unlock(&bif_sdev_list_mutex);
+}
+
+static struct bif_slave_dev *bif_add_slave(struct bif_ctrl_dev *bdev)
+{
+	struct bif_slave_dev *sdev;
+
+	sdev = kzalloc(sizeof(struct bif_slave_dev), GFP_KERNEL);
+	if (sdev == NULL) {
+		pr_err("Memory allocation failed for bif_slave_dev\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sdev->bdev = bdev;
+	INIT_LIST_HEAD(&sdev->list);
+	list_add_tail(&sdev->list, &bif_sdev_list);
+
+	return sdev;
+}
+
+static void bif_remove_slave(struct bif_slave_dev *sdev)
+{
+	list_del(&sdev->list);
+	if (sdev->bdev->selected_sdev == sdev)
+		sdev->bdev->selected_sdev = NULL;
+
+	if (sdev->slave_ctrl_function)
+		kfree(sdev->slave_ctrl_function->irq_notifier_list);
+	kfree(sdev->slave_ctrl_function);
+	kfree(sdev->protocol_function);
+	kfree(sdev->function_directory);
+
+	kfree(sdev);
+}
+
+/* This function assumes that the uid array is all 0 to start with. */
+static void set_uid_bit(u8 uid[BIF_UNIQUE_ID_BYTE_LENGTH], unsigned int bit,
+			unsigned int value)
+{
+	u8 mask;
+
+	if (bit >= BIF_UNIQUE_ID_BIT_LENGTH)
+		return;
+
+	mask = 1 << (7 - (bit % 8));
+
+	uid[bit / 8] &= ~mask;
+	uid[bit / 8] |= value << (7 - (bit % 8));
+}
+
+static unsigned int get_uid_bit(u8 uid[BIF_UNIQUE_ID_BYTE_LENGTH],
+			unsigned int bit)
+{
+	if (bit >= BIF_UNIQUE_ID_BIT_LENGTH)
+		return 0;
+
+	return (uid[bit / 8] & (1 << (7 - (bit % 8)))) ? 1 : 0;
+}
+
+static void bif_enter_irq_mode_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct bif_ctrl_dev *bdev
+		= container_of(dwork, struct bif_ctrl_dev, enter_irq_mode_work);
+	int rc, i;
+
+	mutex_lock(&bdev->mutex);
+	for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+		rc = bdev->desc->ops->set_bus_state(bdev,
+					BIF_BUS_STATE_INTERRUPT);
+		if (rc == 0)
+			break;
+	}
+	mutex_unlock(&bdev->mutex);
+
+	/* Reschedule the task if the transaction failed. */
+	if (rc) {
+		pr_err("Could not set BIF bus to interrupt mode, rc=%d\n", rc);
+		schedule_delayed_work(&bdev->enter_irq_mode_work,
+					bdev->irq_mode_delay_jiffies);
+	}
+}
+
+static void bif_cancel_irq_mode_work(struct bif_ctrl_dev *bdev)
+{
+	cancel_delayed_work(&bdev->enter_irq_mode_work);
+}
+
+static void bif_schedule_irq_mode_work(struct bif_ctrl_dev *bdev)
+{
+	if (bdev->irq_count > 0 &&
+	    bdev->desc->ops->get_bus_state(bdev) != BIF_BUS_STATE_INTERRUPT)
+		schedule_delayed_work(&bdev->enter_irq_mode_work,
+					bdev->irq_mode_delay_jiffies);
+}
+
+static int _bif_select_slave_no_retry(struct bif_slave_dev *sdev)
+{
+	struct bif_ctrl_dev *bdev = sdev->bdev;
+	int rc = 0;
+	int i;
+
+	/* Check if the slave is already selected. */
+	if (sdev->bdev->selected_sdev == sdev)
+		return 0;
+
+	if (sdev->slave_addr) {
+		/* Select using DEV_ADR. */
+		rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+							sdev->slave_addr);
+		if (!rc)
+			sdev->bdev->selected_sdev = sdev;
+	} else if (sdev->unique_id_bits_known == BIF_UNIQUE_ID_BIT_LENGTH) {
+		/* Select using full UID. */
+		for (i = 0; i < BIF_UNIQUE_ID_BYTE_LENGTH - 1; i++) {
+			rc = bdev->desc->ops->bus_transaction(bdev,
+				BIF_TRANS_EDA, sdev->unique_id[i]);
+			if (rc)
+				goto out;
+		}
+
+		rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+			sdev->unique_id[BIF_UNIQUE_ID_BYTE_LENGTH - 1]);
+		if (rc)
+			goto out;
+	} else {
+		pr_err("Cannot select slave because it has neither UID nor DEV_ADR.\n");
+		return -EINVAL;
+	}
+
+	sdev->bdev->selected_sdev = sdev;
+
+	return 0;
+out:
+	pr_err("bus_transaction failed, rc=%d\n", rc);
+	return rc;
+}
+
+static int bif_select_slave(struct bif_slave_dev *sdev)
+{
+	int rc = -EPERM;
+	int i;
+
+	for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+		rc = _bif_select_slave_no_retry(sdev);
+		if (rc == 0)
+			break;
+		/* Force slave reselection. */
+		sdev->bdev->selected_sdev = NULL;
+	}
+
+	return rc;
+}
+
+/*
+ * Returns 1 if slave is selected, 0 if slave is not selected, or errno if
+ * error.
+ */
+static int bif_is_slave_selected(struct bif_ctrl_dev *bdev)
+{
+	int rc = -EPERM;
+	int tack, i;
+
+	for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+		/* Attempt a transaction query. */
+		rc = bdev->desc->ops->bus_transaction_read(bdev, BIF_TRANS_BC,
+						BIF_CMD_TQ, &tack);
+		if (rc == 0 || rc == -ETIMEDOUT)
+			break;
+	}
+
+	if (rc == 0)
+		rc = 1;
+	else if (rc == -ETIMEDOUT)
+		rc = 0;
+	else
+		pr_err("BIF bus_transaction_read failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+/* Read from a specified number of consecutive registers. */
+static int _bif_slave_read_no_retry(struct bif_slave_dev *sdev, u16 addr,
+			u8 *buf, int len)
+{
+	struct bif_ctrl_dev *bdev = sdev->bdev;
+	int rc = 0;
+	int i, response;
+
+	rc = bif_select_slave(sdev);
+	if (rc)
+		return rc;
+
+	if (bdev->desc->ops->read_slave_registers) {
+		/*
+		 * Use low level slave register read implementation in order to
+		 * receive the benefits of BIF burst reads.
+		 */
+		rc = bdev->desc->ops->read_slave_registers(bdev, addr, buf,
+							   len);
+		if (rc)
+			pr_err("read_slave_registers failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < len; i++) {
+		rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_ERA,
+							addr >> 8);
+		if (rc) {
+			pr_err("bus_transaction failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = bdev->desc->ops->bus_transaction_read(bdev, BIF_TRANS_RRA,
+							addr & 0xFF, &response);
+		if (rc) {
+			pr_err("bus_transaction_read failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (!(response & BIF_SLAVE_RD_ACK)) {
+			pr_err("BIF register read error=0x%02X\n",
+				response & BIF_SLAVE_RD_ERR);
+			return -EIO;
+		}
+
+		buf[i] = response & BIF_SLAVE_RD_DATA;
+		addr++;
+	}
+
+	return rc;
+}
+
+/*
+ * Read from a specified number of consecutive registers.  Retry the transaction
+ * several times in case of communcation failures.
+ */
+static int _bif_slave_read(struct bif_slave_dev *sdev, u16 addr, u8 *buf,
+			int len)
+{
+	int rc = -EPERM;
+	int i;
+
+	for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+		rc = _bif_slave_read_no_retry(sdev, addr, buf, len);
+		if (rc == 0)
+			break;
+		/* Force slave reselection. */
+		sdev->bdev->selected_sdev = NULL;
+	}
+
+	return rc;
+}
+
+/* Write to a specified number of consecutive registers. */
+static int _bif_slave_write_no_retry(struct bif_slave_dev *sdev, u16 addr,
+			u8 *buf, int len)
+{
+	struct bif_ctrl_dev *bdev = sdev->bdev;
+	int rc = 0;
+	int i;
+
+	rc = bif_select_slave(sdev);
+	if (rc)
+		return rc;
+
+	if (bdev->desc->ops->write_slave_registers) {
+		/*
+		 * Use low level slave register write implementation in order to
+		 * receive the benefits of BIF burst writes.
+		 */
+		rc = bdev->desc->ops->write_slave_registers(bdev, addr, buf,
+							    len);
+		if (rc)
+			pr_err("write_slave_registers failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_ERA, addr >> 8);
+	if (rc)
+		goto out;
+
+	rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_WRA, addr & 0xFF);
+	if (rc)
+		goto out;
+
+	for (i = 0; i < len; i++) {
+		rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_WD,
+							buf[i]);
+		if (rc)
+			goto out;
+	}
+
+	return 0;
+out:
+	pr_err("bus_transaction failed, rc=%d\n", rc);
+	return rc;
+}
+
+/*
+ * Write to a specified number of consecutive registers.  Retry the transaction
+ * several times in case of communcation failures.
+ */
+static int _bif_slave_write(struct bif_slave_dev *sdev, u16 addr, u8 *buf,
+			int len)
+{
+	int rc = -EPERM;
+	int i;
+
+	for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+		rc = _bif_slave_write_no_retry(sdev, addr, buf, len);
+		if (rc == 0)
+			break;
+		/* Force slave reselection. */
+		sdev->bdev->selected_sdev = NULL;
+	}
+
+	return rc;
+}
+
+/* Takes a mutex if this consumer is not an exclusive bus user. */
+static void bif_ctrl_lock(struct bif_ctrl *ctrl)
+{
+	if (!ctrl->exclusive_lock) {
+		mutex_lock(&ctrl->bdev->mutex);
+		bif_cancel_irq_mode_work(ctrl->bdev);
+	}
+}
+
+/* Releases a mutex if this consumer is not an exclusive bus user. */
+static void bif_ctrl_unlock(struct bif_ctrl *ctrl)
+{
+	if (!ctrl->exclusive_lock) {
+		bif_schedule_irq_mode_work(ctrl->bdev);
+		mutex_unlock(&ctrl->bdev->mutex);
+	}
+}
+
+static void bif_slave_ctrl_lock(struct bif_slave *slave)
+{
+	bif_ctrl_lock(&slave->ctrl);
+}
+
+static void bif_slave_ctrl_unlock(struct bif_slave *slave)
+{
+	bif_ctrl_unlock(&slave->ctrl);
+}
+
+static int bif_check_task(struct bif_slave *slave, unsigned int task)
+{
+	if (IS_ERR_OR_NULL(slave)) {
+		pr_err("Invalid slave handle.\n");
+		return -EINVAL;
+	} else if (!slave->sdev->bdev) {
+		pr_err("BIF controller has been removed.\n");
+		return -ENXIO;
+	} else if (!slave->sdev->slave_ctrl_function
+			|| slave->sdev->slave_ctrl_function->task_count == 0) {
+		pr_err("BIF slave does not support slave control.\n");
+		return -ENODEV;
+	} else if (task >= slave->sdev->slave_ctrl_function->task_count) {
+		pr_err("Requested task: %u greater than max: %u for this slave\n",
+			task, slave->sdev->slave_ctrl_function->task_count);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * bif_request_irq() - request a BIF slave IRQ by slave task number
+ * @slave:	BIF slave handle
+ * @task:	BIF task number of the IRQ inside of the slave.  This
+ *		corresponds to the slave control channel specified for a given
+ *		BIF function inside of the slave.
+ * @nb:		Notifier block to call when the IRQ fires
+ *
+ * This function registers a notifier block to call when the BIF slave interrupt
+ * is triggered and also enables the interrupt.  The interrupt is enabled inside
+ * of the BIF slave's slave control function and also the BIF bus is put into
+ * interrupt mode.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_request_irq(struct bif_slave *slave, unsigned int task,
+			struct notifier_block *nb)
+{
+	int rc;
+	u16 addr;
+	u8 reg, mask;
+
+	rc = bif_check_task(slave, task);
+	if (rc) {
+		pr_err("Invalid slave or task, rc=%d\n", rc);
+		return rc;
+	}
+
+	bif_slave_ctrl_lock(slave);
+
+	rc = blocking_notifier_chain_register(
+		&slave->sdev->slave_ctrl_function->irq_notifier_list[task], nb);
+	if (rc) {
+		pr_err("Notifier registration failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	/* Enable the interrupt within the slave */
+	mask = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+	addr = SLAVE_CTRL_FUNC_IRQ_EN_ADDR(
+		slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+	if (task / SLAVE_CTRL_TASKS_PER_SET == 0) {
+		/* Set global interrupt enable. */
+		mask |= BIT(0);
+	}
+	rc = _bif_slave_read(slave->sdev, addr, &reg, 1);
+	if (rc) {
+		pr_err("BIF slave register read failed, rc=%d\n", rc);
+		goto notifier_unregister;
+	}
+	reg |= mask;
+	rc = _bif_slave_write(slave->sdev, addr, &reg, 1);
+	if (rc) {
+		pr_err("BIF slave register write failed, rc=%d\n", rc);
+		goto notifier_unregister;
+	}
+
+	/* Set global interrupt enable if task not in set 0. */
+	if (task / SLAVE_CTRL_TASKS_PER_SET != 0) {
+		mask = BIT(0);
+		addr = SLAVE_CTRL_FUNC_IRQ_EN_ADDR(
+		       slave->sdev->slave_ctrl_function->slave_ctrl_pointer, 0);
+		rc = _bif_slave_read(slave->sdev, addr, &reg, 1);
+		if (rc) {
+			pr_err("BIF slave register read failed, rc=%d\n", rc);
+			goto notifier_unregister;
+		}
+		reg |= mask;
+		rc = _bif_slave_write(slave->sdev, addr, &reg, 1);
+		if (rc) {
+			pr_err("BIF slave register write failed, rc=%d\n", rc);
+			goto notifier_unregister;
+		}
+	}
+
+	rc = slave->sdev->bdev->desc->ops->set_bus_state(slave->sdev->bdev,
+		BIF_BUS_STATE_INTERRUPT);
+	if (rc) {
+		pr_err("Could not set BIF bus to interrupt mode, rc=%d\n", rc);
+		goto notifier_unregister;
+	}
+
+	slave->sdev->bdev->irq_count++;
+done:
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+
+notifier_unregister:
+	blocking_notifier_chain_unregister(
+		&slave->sdev->slave_ctrl_function->irq_notifier_list[task],
+		nb);
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+
+}
+EXPORT_SYMBOL(bif_request_irq);
+
+/**
+ * bif_free_irq() - free a BIF slave IRQ by slave task number
+ * @slave:	BIF slave handle
+ * @task:	BIF task number of the IRQ inside of the slave.  This
+ *		corresponds to the slave control channel specified for a given
+ *		BIF function inside of the slave.
+ * @nb:		Notifier block previously registered with this interrupt
+ *
+ * This function unregisters a notifier block that was previously registered
+ * with bif_request_irq().
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_free_irq(struct bif_slave *slave, unsigned int task,
+			struct notifier_block *nb)
+{
+	int rc;
+	u16 addr;
+	u8 reg;
+
+	rc = bif_check_task(slave, task);
+	if (rc) {
+		pr_err("Invalid slave or task, rc=%d\n", rc);
+		return rc;
+	}
+
+	bif_slave_ctrl_lock(slave);
+
+	/* Disable the interrupt within the slave */
+	reg = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+	addr = SLAVE_CTRL_FUNC_IRQ_CLEAR_ADDR(
+		slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+	rc = _bif_slave_write(slave->sdev, addr, &reg, 1);
+	if (rc) {
+		pr_err("BIF slave register write failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = blocking_notifier_chain_unregister(
+		&slave->sdev->slave_ctrl_function->irq_notifier_list[task], nb);
+	if (rc) {
+		pr_err("Notifier unregistration failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	slave->sdev->bdev->irq_count--;
+
+	if (slave->sdev->bdev->irq_count == 0) {
+		bif_cancel_irq_mode_work(slave->sdev->bdev);
+	} else if (slave->sdev->bdev->irq_count < 0) {
+		pr_err("Unbalanced IRQ free.\n");
+		rc = -EINVAL;
+		slave->sdev->bdev->irq_count = 0;
+	}
+done:
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_free_irq);
+
+/**
+ * bif_trigger_task() - trigger a task within a BIF slave
+ * @slave:	BIF slave handle
+ * @task:	BIF task inside of the slave to trigger.  This corresponds to
+ *		the slave control channel specified for a given BIF function
+ *		inside of the slave.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_trigger_task(struct bif_slave *slave, unsigned int task)
+{
+	int rc;
+	u16 addr;
+	u8 reg;
+
+	rc = bif_check_task(slave, task);
+	if (rc) {
+		pr_err("Invalid slave or task, rc=%d\n", rc);
+		return rc;
+	}
+
+	bif_slave_ctrl_lock(slave);
+
+	/* Trigger the task within the slave. */
+	reg = BIT(task % SLAVE_CTRL_TASKS_PER_SET);
+	addr = SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(
+		slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+	rc = _bif_slave_write(slave->sdev, addr, &reg, 1);
+	if (rc) {
+		pr_err("BIF slave register write failed, rc=%d\n", rc);
+		goto done;
+	}
+
+done:
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_trigger_task);
+
+/**
+ * bif_task_is_busy() - checks the state of a BIF slave task
+ * @slave:	BIF slave handle
+ * @task:	BIF task inside of the slave to trigger.  This corresponds to
+ *		the slave control channel specified for a given	BIF function
+ *		inside of the slave.
+ *
+ * Returns 1 if the task is busy, 0 if it is not busy, and errno on error.
+ */
+int bif_task_is_busy(struct bif_slave *slave, unsigned int task)
+{
+	int rc;
+	u16 addr;
+	u8 reg;
+
+	rc = bif_check_task(slave, task);
+	if (rc) {
+		pr_err("Invalid slave or task, rc=%d\n", rc);
+		return rc;
+	}
+
+	bif_slave_ctrl_lock(slave);
+
+	/* Check the task busy state. */
+	addr = SLAVE_CTRL_FUNC_TASK_BUSY_ADDR(
+		slave->sdev->slave_ctrl_function->slave_ctrl_pointer, task);
+	rc = _bif_slave_read(slave->sdev, addr, &reg, 1);
+	if (rc) {
+		pr_err("BIF slave register read failed, rc=%d\n", rc);
+		goto done;
+	}
+
+	rc = (reg & BIT(task % SLAVE_CTRL_TASKS_PER_SET)) ? 1 : 0;
+done:
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_task_is_busy);
+
+static int bif_slave_notify_irqs(struct bif_slave_dev *sdev, int set, u8 val)
+{
+	int rc = 0;
+	int i, task;
+
+	for (i = 0; i < SLAVE_CTRL_TASKS_PER_SET; i++) {
+		if (val & (1 << i)) {
+			task = set * SLAVE_CTRL_TASKS_PER_SET + i;
+
+			rc = blocking_notifier_call_chain(
+			    &sdev->slave_ctrl_function->irq_notifier_list[task],
+			    task, sdev->bdev);
+			rc = notifier_to_errno(rc);
+			if (rc)
+				pr_err("Notification failed for task %d\n",
+					task);
+		}
+	}
+
+	return rc;
+}
+
+static int bif_slave_handle_irq(struct bif_slave_dev *sdev)
+{
+	struct bif_ctrl_dev *bdev = sdev->bdev;
+	bool resp = false;
+	int rc = 0;
+	int i;
+	u16 addr;
+	u8 reg;
+
+	mutex_lock(&sdev->bdev->mutex);
+	bif_cancel_irq_mode_work(sdev->bdev);
+
+	rc = bif_select_slave(sdev);
+	if (rc) {
+		pr_err("Could not select slave, rc=%d\n", rc);
+		goto done;
+	}
+
+	/* Check overall slave interrupt status. */
+	rc = bdev->desc->ops->bus_transaction_query(bdev, BIF_TRANS_BC,
+						    BIF_CMD_ISTS, &resp);
+	if (rc) {
+		pr_err("Could not query slave interrupt status, rc=%d\n", rc);
+		goto done;
+	}
+
+	if (resp) {
+		for (i = 0; i < sdev->slave_ctrl_function->task_count
+					/ SLAVE_CTRL_TASKS_PER_SET; i++) {
+			addr = sdev->slave_ctrl_function->slave_ctrl_pointer
+				+ 4 * i + 1;
+			rc = _bif_slave_read(sdev, addr, &reg, 1);
+			if (rc) {
+				pr_err("BIF slave register read failed, rc=%d\n",
+					rc);
+				goto done;
+			}
+
+			/* Ensure that interrupts are pending in the set. */
+			if (reg != 0x00) {
+				/*
+				 * Release mutex before notifying consumers so
+				 * that they can use the bus.
+				 */
+				mutex_unlock(&sdev->bdev->mutex);
+				rc = bif_slave_notify_irqs(sdev, i, reg);
+				if (rc) {
+					pr_err("BIF slave irq notification failed, rc=%d\n",
+						rc);
+					goto notification_failed;
+				}
+				mutex_lock(&sdev->bdev->mutex);
+
+				rc = bif_select_slave(sdev);
+				if (rc) {
+					pr_err("Could not select slave, rc=%d\n",
+						rc);
+					goto done;
+				}
+
+				/* Clear all interrupts in this set. */
+				rc = _bif_slave_write(sdev, addr, &reg, 1);
+				if (rc) {
+					pr_err("BIF slave register write failed, rc=%d\n",
+						rc);
+					goto done;
+				}
+			}
+		}
+	}
+
+done:
+	bif_schedule_irq_mode_work(sdev->bdev);
+	mutex_unlock(&sdev->bdev->mutex);
+notification_failed:
+	if (rc == 0)
+		rc = resp;
+	return rc;
+}
+
+/**
+ * bif_ctrl_notify_slave_irq() - notify the BIF framework that a slave interrupt
+ *				was received by a BIF controller
+ * @bdev:	BIF controller device pointer
+ *
+ * This function should only be called from a BIF controller driver.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev)
+{
+	struct bif_slave_dev *sdev;
+	int rc = 0, handled = 0;
+
+	if (IS_ERR_OR_NULL(bdev))
+		return -EINVAL;
+
+	mutex_lock(&bif_sdev_list_mutex);
+
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		if (sdev->bdev == bdev && sdev->present) {
+			rc = bif_slave_handle_irq(sdev);
+			if (rc < 0) {
+				pr_err("Could not handle BIF slave irq, rc=%d\n",
+					rc);
+				break;
+			}
+			handled += rc;
+		}
+	}
+
+	mutex_unlock(&bif_sdev_list_mutex);
+
+	if (handled == 0)
+		pr_info("Spurious BIF slave interrupt detected.\n");
+
+	if (rc > 0)
+		rc = 0;
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notify_slave_irq);
+
+/**
+ * bif_ctrl_notify_battery_changed() - notify the BIF framework that a battery
+ *				pack has been inserted or removed
+ * @bdev:	BIF controller device pointer
+ *
+ * This function should only be called from a BIF controller driver.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notify_battery_changed(struct bif_ctrl_dev *bdev)
+{
+	int rc = 0;
+	int present;
+
+	if (IS_ERR_OR_NULL(bdev))
+		return -EINVAL;
+
+	if (bdev->desc->ops->get_battery_presence) {
+		present = bdev->desc->ops->get_battery_presence(bdev);
+		if (present < 0) {
+			pr_err("Could not determine battery presence, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		if (bdev->battery_present == !!present)
+			return 0;
+
+		bdev->battery_present = present;
+
+		rc = blocking_notifier_call_chain(&bdev->bus_change_notifier,
+			present ? BIF_BUS_EVENT_BATTERY_INSERTED
+				: BIF_BUS_EVENT_BATTERY_REMOVED, bdev);
+		if (rc)
+			pr_err("Call chain noification failed, rc=%d\n", rc);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notify_battery_changed);
+
+/**
+ * bif_ctrl_signal_battery_changed() - notify the BIF framework that a battery
+ *				pack has been inserted or removed
+ * @ctrl:	BIF controller consumer handle
+ *
+ * This function should only be called by a BIF consumer driver on systems where
+ * the BIF controller driver is unable to determine when a battery is inserted
+ * or removed.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl)
+{
+	if (IS_ERR_OR_NULL(ctrl))
+		return -EINVAL;
+
+	return bif_ctrl_notify_battery_changed(ctrl->bdev);
+}
+EXPORT_SYMBOL(bif_ctrl_signal_battery_changed);
+
+/**
+ * bif_ctrl_notifier_register() - register a notifier block to be called when
+ *				a battery pack is inserted or removed
+ * @ctrl:	BIF controller consumer handle
+ *
+ * The value passed into the notifier when it is called is one of
+ * enum bif_bus_event.
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notifier_register(struct bif_ctrl *ctrl, struct notifier_block *nb)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl))
+		return -EINVAL;
+
+	rc = blocking_notifier_chain_register(&ctrl->bdev->bus_change_notifier,
+					      nb);
+	if (rc)
+		pr_err("Notifier registration failed, rc=%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notifier_register);
+
+/**
+ * bif_ctrl_notifier_unregister() - unregister a battery status change notifier
+ *				block that was previously registered
+ * @ctrl:	BIF controller consumer handle
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+				 struct notifier_block *nb)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl))
+		return -EINVAL;
+
+	rc =
+	    blocking_notifier_chain_unregister(&ctrl->bdev->bus_change_notifier,
+						nb);
+	if (rc)
+		pr_err("Notifier unregistration failed, rc=%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_notifier_unregister);
+
+/**
+ * bif_get_bus_handle() - returns the BIF controller consumer handle associated
+ *			with a BIF slave handle
+ * @slave:	BIF slave handle
+ *
+ * Note, bif_ctrl_put() should never be called for the pointer output by
+ * bif_get_bus_handle().
+ */
+struct bif_ctrl *bif_get_bus_handle(struct bif_slave *slave)
+{
+	if (IS_ERR_OR_NULL(slave))
+		return ERR_PTR(-EINVAL);
+
+	return &slave->ctrl;
+}
+EXPORT_SYMBOL(bif_get_bus_handle);
+
+/**
+ * bif_ctrl_count() - returns the number of registered BIF controllers
+ */
+int bif_ctrl_count(void)
+{
+	struct bif_ctrl_dev *bdev;
+	int count = 0;
+
+	mutex_lock(&bif_ctrl_list_mutex);
+
+	list_for_each_entry(bdev, &bif_ctrl_list, list) {
+		count++;
+	}
+	mutex_unlock(&bif_ctrl_list_mutex);
+
+	return count;
+}
+EXPORT_SYMBOL(bif_ctrl_count);
+
+/**
+ * bif_ctrl_get_by_id() - get a handle for the id'th BIF controller registered
+ *			in the system
+ * @id:	Arbitrary number associated with the BIF bus in the system
+ *
+ * id must be in the range [0, bif_ctrl_count() - 1].  This function should only
+ * need to be called by a BIF consumer that is unable to link to a given BIF
+ * controller via a device tree binding.
+ *
+ * Returns a BIF controller consumer handle if successful or an ERR_PTR if not.
+ */
+struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id)
+{
+	struct bif_ctrl_dev *bdev;
+	struct bif_ctrl_dev *bdev_found = NULL;
+	struct bif_ctrl *ctrl = ERR_PTR(-ENODEV);
+
+	mutex_lock(&bif_ctrl_list_mutex);
+
+	list_for_each_entry(bdev, &bif_ctrl_list, list) {
+		if (id == 0) {
+			bdev_found = bdev;
+			break;
+		}
+		id--;
+	}
+	mutex_unlock(&bif_ctrl_list_mutex);
+
+	if (bdev_found) {
+		ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+		if (!ctrl) {
+			pr_err("Bus handle allocation failed\n");
+			ctrl = ERR_PTR(-ENOMEM);
+		} else {
+			ctrl->bdev = bdev_found;
+		}
+	}
+
+	return ctrl;
+}
+EXPORT_SYMBOL(bif_ctrl_get_by_id);
+
+/**
+ * bif_ctrl_get() - get a handle for the BIF controller that is linked to the
+ *			consumer device in the device tree
+ * @consumer_dev:	Pointer to the consumer's device
+ *
+ * In order to use this function, the BIF consumer's device must specify the
+ * "qcom,bif-ctrl" property in its device tree node which points to a BIF
+ * controller device node.
+ *
+ * Returns a BIF controller consumer handle if successful or an ERR_PTR if not.
+ * If the BIF controller linked to the consumer device has not yet probed, then
+ * ERR_PTR(-EPROBE_DEFER) is returned.
+ */
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev)
+{
+	struct device_node *ctrl_node = NULL;
+	struct bif_ctrl_dev *bdev_found = NULL;
+	struct bif_ctrl *ctrl = ERR_PTR(-EPROBE_DEFER);
+	struct bif_ctrl_dev *bdev = NULL;
+
+	if (!consumer_dev || !consumer_dev->of_node) {
+		pr_err("Invalid device node\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctrl_node = of_parse_phandle(consumer_dev->of_node, "qcom,bif-ctrl", 0);
+	if (!ctrl_node) {
+		pr_err("Could not find qcom,bif-ctrl property in %s\n",
+			consumer_dev->of_node->full_name);
+		return ERR_PTR(-ENXIO);
+	}
+
+	mutex_lock(&bif_ctrl_list_mutex);
+	list_for_each_entry(bdev, &bif_ctrl_list, list) {
+		if (bdev->ctrl_dev && bdev->ctrl_dev->of_node == ctrl_node) {
+			bdev_found = bdev;
+			break;
+		}
+	}
+	mutex_unlock(&bif_ctrl_list_mutex);
+
+	if (bdev_found) {
+		ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+		if (!ctrl) {
+			pr_err("Bus handle allocation failed\n");
+			ctrl = ERR_PTR(-ENOMEM);
+		} else {
+			ctrl->bdev = bdev_found;
+		}
+	}
+
+	return ctrl;
+}
+EXPORT_SYMBOL(bif_ctrl_get);
+
+/**
+ * bif_ctrl_put() - frees a BIF controller handle
+ * @ctrl:	BIF controller consumer handle
+ */
+void bif_ctrl_put(struct bif_ctrl *ctrl)
+{
+	if (!IS_ERR_OR_NULL(ctrl) && ctrl->exclusive_lock)
+		mutex_unlock(&ctrl->bdev->mutex);
+	kfree(ctrl);
+}
+EXPORT_SYMBOL(bif_ctrl_put);
+
+/*
+ * Returns true if all parameters are matched, otherwise false.
+ * function_type and function_version mean that their exists some function in
+ * the slave which has the specified type and subtype.  ctrl == NULL is treated
+ * as a wildcard.
+ */
+static bool bif_slave_match(const struct bif_ctrl *ctrl,
+	struct bif_slave_dev *sdev, const struct bif_match_criteria *criteria)
+{
+	int i, type, version;
+
+	if (ctrl && (ctrl->bdev != sdev->bdev))
+		return false;
+
+	if (!sdev->present
+	    && (!(criteria->match_mask & BIF_MATCH_IGNORE_PRESENCE)
+		|| ((criteria->match_mask & BIF_MATCH_IGNORE_PRESENCE)
+		    && !criteria->ignore_presence)))
+		return false;
+
+	if ((criteria->match_mask & BIF_MATCH_MANUFACTURER_ID)
+	    && sdev->l1_data.manufacturer_id != criteria->manufacturer_id)
+		return false;
+
+	if ((criteria->match_mask & BIF_MATCH_PRODUCT_ID)
+	    && sdev->l1_data.product_id != criteria->product_id)
+		return false;
+
+	if (criteria->match_mask & BIF_MATCH_FUNCTION_TYPE) {
+		if (!sdev->function_directory)
+			return false;
+		for (i = 0; i < sdev->l1_data.length / 4; i++) {
+			type = sdev->function_directory[i].function_type;
+			version = sdev->function_directory[i].function_version;
+			if (type == criteria->function_type &&
+				(version == criteria->function_version
+					|| !(criteria->match_mask
+						& BIF_MATCH_FUNCTION_VERSION)))
+				return true;
+		}
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * bif_slave_match_count() - returns the number of slaves associated with the
+ *			specified BIF controller which fit the matching
+ *			criteria
+ * @ctrl:		BIF controller consumer handle
+ * @match_criteria:	Matching criteria used to filter slaves
+ */
+int bif_slave_match_count(const struct bif_ctrl *ctrl,
+			const struct bif_match_criteria *match_criteria)
+{
+	struct bif_slave_dev *sdev;
+	int count = 0;
+
+	mutex_lock(&bif_sdev_list_mutex);
+
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		if (bif_slave_match(ctrl, sdev, match_criteria))
+			count++;
+	}
+
+	mutex_unlock(&bif_sdev_list_mutex);
+
+	return count;
+}
+EXPORT_SYMBOL(bif_slave_match_count);
+
+/**
+ * bif_slave_match_get() - get a slave handle for the id'th slave associated
+ *			with the specified BIF controller which fits the
+ *			matching criteria
+ * @ctrl:		BIF controller consumer handle
+ * @id:			Index into the set of matching slaves
+ * @match_criteria:	Matching criteria used to filter slaves
+ *
+ * id must be in the range [0, bif_slave_match_count(ctrl, match_criteria) - 1].
+ *
+ * Returns a BIF slave handle if successful or an ERR_PTR if not.
+ */
+struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+	unsigned int id, const struct bif_match_criteria *match_criteria)
+{
+	struct bif_slave_dev *sdev;
+	struct bif_slave *slave = ERR_PTR(-ENODEV);
+	struct bif_slave_dev *sdev_found = NULL;
+	int count = 0;
+
+	mutex_lock(&bif_sdev_list_mutex);
+
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		if (bif_slave_match(ctrl, sdev, match_criteria))
+			count++;
+		if (count == id + 1) {
+			sdev_found = sdev;
+			break;
+		}
+	}
+
+	mutex_unlock(&bif_sdev_list_mutex);
+
+	if (sdev_found) {
+		slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+		if (!slave) {
+			pr_err("Slave allocation failed\n");
+			slave = ERR_PTR(-ENOMEM);
+		} else {
+			slave->sdev = sdev_found;
+			slave->ctrl.bdev = sdev_found->bdev;
+		}
+	}
+
+	return slave;
+}
+EXPORT_SYMBOL(bif_slave_match_get);
+
+/**
+ * bif_slave_put() - frees a BIF slave handle
+ * @slave:	BIF slave handle
+ */
+void bif_slave_put(struct bif_slave *slave)
+{
+	if (!IS_ERR_OR_NULL(slave) && slave->ctrl.exclusive_lock)
+		mutex_unlock(&slave->sdev->bdev->mutex);
+	kfree(slave);
+}
+EXPORT_SYMBOL(bif_slave_put);
+
+/**
+ * bif_slave_find_function() - get the function pointer and version of a
+ *			BIF function if it is present on the specified slave
+ * @slave:		BIF slave handle
+ * @function:		BIF function to search for inside of the slave
+ * @version:		If the function is found, then 'version' is set to the
+ *			version value of the function
+ * @function_pointer:	If the function is found, then 'function_pointer' is set
+ *			to the BIF slave address of the function
+ *
+ * Returns 0 for success or errno if an error occurred.  If the function is not
+ * found in the slave, then -ENODEV is returned.
+ */
+int bif_slave_find_function(struct bif_slave *slave, u8 function, u8 *version,
+				u16 *function_pointer)
+{
+	int rc = -ENODEV;
+	struct bif_ddb_l2_data *func;
+	int i;
+
+	if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(version)
+	    || IS_ERR_OR_NULL(function_pointer)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	func = slave->sdev->function_directory;
+
+	for (i = 0; i < slave->sdev->l1_data.length / 4; i++) {
+		if (function == func[i].function_type) {
+			*version = func[i].function_version;
+			*function_pointer = func[i].function_pointer;
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_slave_find_function);
+
+/**
+ * bif_slave_read() - read contiguous memory values from a BIF slave
+ * @slave:	BIF slave handle
+ * @addr:	BIF slave address to begin reading at
+ * @buf:	Buffer to fill with memory values
+ * @len:	Number of byte to read
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf, int len)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(buf)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	bif_slave_ctrl_lock(slave);
+
+	rc = _bif_slave_read(slave->sdev, addr, buf, len);
+	if (rc)
+		pr_err("BIF slave read failed, rc=%d\n", rc);
+
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_slave_read);
+
+/**
+ * bif_slave_write() - write contiguous memory values to a BIF slave
+ * @slave:	BIF slave handle
+ * @addr:	BIF slave address to begin writing at
+ * @buf:	Buffer containing values to write
+ * @len:	Number of byte to write
+ *
+ * Returns 0 for success or errno if an error occurred.
+ */
+int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf, int len)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(slave) || IS_ERR_OR_NULL(buf)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	bif_slave_ctrl_lock(slave);
+
+	rc = _bif_slave_write(slave->sdev, addr, buf, len);
+	if (rc)
+		pr_err("BIF slave write failed, rc=%d\n", rc);
+
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_slave_write);
+
+/**
+ * bif_slave_is_present() - check if a slave is currently physically present
+ *		in the system
+ * @slave:	BIF slave handle
+ *
+ * Returns 1 if the slave is present, 0 if the slave is not present, or errno
+ * if an error occurred.
+ *
+ * This function can be used by BIF consumer drivers to check if their slave
+ * handles are still meaningful after battery reinsertion.
+ */
+int bif_slave_is_present(struct bif_slave *slave)
+{
+	if (IS_ERR_OR_NULL(slave)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	return slave->sdev->present;
+}
+EXPORT_SYMBOL(bif_slave_is_present);
+
+/**
+ * bif_slave_is_selected() - check if a slave is currently selected on the BIF
+ *		bus
+ * @slave:	BIF slave handle
+ *
+ * Returns 1 if the slave is selected, 0 if the slave is not selected, or errno
+ * if an error occurred.
+ *
+ * This function should not be required under normal circumstances since the
+ * bif-core framework ensures that slaves are always selected when needed.
+ * It would be most useful when used as a helper in conjunction with
+ * bif_ctrl_bus_lock() and the raw transaction functions.
+ */
+int bif_slave_is_selected(struct bif_slave *slave)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(slave)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	if (slave->sdev->bdev->selected_sdev != slave->sdev)
+		return false;
+
+	bif_slave_ctrl_lock(slave);
+	rc = bif_is_slave_selected(slave->sdev->bdev);
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_slave_is_selected);
+
+/**
+ * bif_slave_select() - select a slave on the BIF bus
+ * @slave:	BIF slave handle
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should not be required under normal circumstances since the
+ * bif-core framework ensures that slaves are always selected when needed.
+ * It would be most useful when used as a helper in conjunction with
+ * bif_ctrl_bus_lock() and the raw transaction functions.
+ */
+int bif_slave_select(struct bif_slave *slave)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(slave)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	bif_slave_ctrl_lock(slave);
+	slave->sdev->bdev->selected_sdev = NULL;
+	rc = bif_select_slave(slave->sdev);
+	bif_slave_ctrl_unlock(slave);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_slave_select);
+
+/**
+ * bif_ctrl_raw_transaction() - perform a raw BIF transaction on the bus which
+ *			expects no slave response
+ * @ctrl:		BIF controller consumer handle
+ * @transaction:	BIF transaction to carry out.  This should be one of the
+ *			values in enum bif_transaction.
+ * @data:		8-bit data to use in the transaction.  The meaning of
+ *			this data depends upon the transaction that is to be
+ *			performed.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter.  Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	bif_ctrl_lock(ctrl);
+
+	rc = ctrl->bdev->desc->ops->bus_transaction(ctrl->bdev, transaction,
+							data);
+	if (rc)
+		pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+	bif_ctrl_unlock(ctrl);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction);
+
+/**
+ * bif_ctrl_raw_transaction_read() - perform a raw BIF transaction on the bus
+ *			which expects an RD or TACK slave response word
+ * @ctrl:		BIF controller consumer handle
+ * @transaction:	BIF transaction to carry out.  This should be one of the
+ *			values in enum bif_transaction.
+ * @data:		8-bit data to use in the transaction.  The meaning of
+ *			this data depends upon the transaction that is to be
+ *			performed.
+ * @response:		Pointer to an integer which is filled with the 11-bit
+ *			slave response word upon success.  The 11-bit format is
+ *			(MSB to LSB) BCF, ACK, EOT, D7-D0.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter.  Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+					u8 data, int *response)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl) || IS_ERR_OR_NULL(response)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	bif_ctrl_lock(ctrl);
+
+	rc = ctrl->bdev->desc->ops->bus_transaction_read(ctrl->bdev,
+					transaction, data, response);
+	if (rc)
+		pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+	bif_ctrl_unlock(ctrl);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction_read);
+
+/**
+ * bif_ctrl_raw_transaction_query() - perform a raw BIF transaction on the bus
+ *			which expects a BQ slave response
+ * @ctrl:		BIF controller consumer handle
+ * @transaction:	BIF transaction to carry out.  This should be one of the
+ *			values in enum bif_transaction.
+ * @data:		8-bit data to use in the transaction.  The meaning of
+ *			this data depends upon the transaction that is to be
+ *			performed.
+ * @query_response:	Pointer to boolean which is set to true if a BQ pulse
+ *			is receieved, or false if no BQ pulse is received before
+ *			timing out.
+ *
+ * When performing a bus command (BC) transaction, values in enum
+ * bif_bus_command may be used for the data parameter.  Additional manufacturer
+ * specific values may also be used in a BC transaction.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ *
+ * This function should only need to be used when BIF transactions are required
+ * that are not handled by the bif-core directly.
+ */
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+		u8 data, bool *query_response)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl) || IS_ERR_OR_NULL(query_response)) {
+		pr_err("Invalid pointer input.\n");
+		return -EINVAL;
+	}
+
+	bif_ctrl_lock(ctrl);
+
+	rc = ctrl->bdev->desc->ops->bus_transaction_query(ctrl->bdev,
+					transaction, data, query_response);
+	if (rc)
+		pr_err("BIF bus transaction failed, rc=%d\n", rc);
+
+	bif_ctrl_unlock(ctrl);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_raw_transaction_query);
+
+/**
+ * bif_ctrl_bus_lock() - lock the BIF bus of a controller for exclusive access
+ * @ctrl:	BIF controller consumer handle
+ *
+ * This function should only need to be called in circumstances where a BIF
+ * consumer is issuing special BIF bus commands that have strict ordering
+ * requirements.
+ */
+void bif_ctrl_bus_lock(struct bif_ctrl *ctrl)
+{
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid controller handle.\n");
+		return;
+	}
+
+	if (ctrl->exclusive_lock) {
+		pr_err("BIF bus exclusive lock already held\n");
+		return;
+	}
+
+	mutex_lock(&ctrl->bdev->mutex);
+	ctrl->exclusive_lock = true;
+	bif_cancel_irq_mode_work(ctrl->bdev);
+}
+EXPORT_SYMBOL(bif_ctrl_bus_lock);
+
+/**
+ * bif_ctrl_bus_unlock() - lock the BIF bus of a controller that was previously
+ *		locked for exclusive access
+ * @ctrl:	BIF controller consumer handle
+ *
+ * This function must only be called after first calling bif_ctrl_bus_lock().
+ */
+void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl)
+{
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid controller handle.\n");
+		return;
+	}
+
+	if (!ctrl->exclusive_lock) {
+		pr_err("BIF bus exclusive lock not already held\n");
+		return;
+	}
+
+	ctrl->exclusive_lock = false;
+	bif_schedule_irq_mode_work(ctrl->bdev);
+	mutex_unlock(&ctrl->bdev->mutex);
+}
+EXPORT_SYMBOL(bif_ctrl_bus_unlock);
+
+/**
+ * bif_ctrl_measure_rid() - measure the battery pack Rid pull-down resistance
+ *		in ohms
+ * @ctrl:	BIF controller consumer handle
+ *
+ * Returns the resistance of the Rid resistor in ohms if successful or errno
+ * if an error occurred.
+ */
+int bif_ctrl_measure_rid(struct bif_ctrl *ctrl)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid controller handle.\n");
+		return -ENODEV;
+	}
+
+	if (!ctrl->bdev->desc->ops->get_battery_rid) {
+		pr_err("Cannot measure Rid.\n");
+		return -ENXIO;
+	}
+
+	bif_ctrl_lock(ctrl);
+
+	rc = ctrl->bdev->desc->ops->get_battery_rid(ctrl->bdev);
+	if (rc < 0)
+		pr_err("Error during Rid measurement, rc=%d\n", rc);
+
+	bif_ctrl_unlock(ctrl);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_measure_rid);
+
+/**
+ * bif_ctrl_get_bus_period() - get the BIF bus period (tau_bif) in nanoseconds
+ * @ctrl:	BIF controller consumer handle
+ *
+ * Returns the currently configured bus period in nanoseconds if successful or
+ * errno if an error occurred.
+ */
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid controller handle.\n");
+		return -ENODEV;
+	}
+
+	if (!ctrl->bdev->desc->ops->get_bus_period) {
+		pr_err("Cannot get the BIF bus period.\n");
+		return -ENXIO;
+	}
+
+	rc = ctrl->bdev->desc->ops->get_bus_period(ctrl->bdev);
+	if (rc < 0)
+		pr_err("Error during bus period retrieval, rc=%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_get_bus_period);
+
+/**
+ * bif_ctrl_set_bus_period() - set the BIF bus period (tau_bif) in nanoseconds
+ * @ctrl:	BIF controller consumer handle
+ * @period_ns:	BIF bus period in nanoseconds to use
+ *
+ * If the exact period is not supported by the BIF controller hardware, then the
+ * next larger supported period will be used.
+ *
+ * Returns 0 on success or errno if an error occurred.
+ */
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid controller handle.\n");
+		return -ENODEV;
+	}
+
+	if (!ctrl->bdev->desc->ops->set_bus_period) {
+		pr_err("Cannot set the BIF bus period.\n");
+		return -ENXIO;
+	}
+
+	bif_ctrl_lock(ctrl);
+	rc = ctrl->bdev->desc->ops->set_bus_period(ctrl->bdev, period_ns);
+	if (rc)
+		pr_err("Error during bus period configuration, rc=%d\n", rc);
+	bif_ctrl_unlock(ctrl);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_set_bus_period);
+
+/**
+ * bif_ctrl_get_bus_state() - get the current state of the BIF bus
+ * @ctrl:	BIF controller consumer handle
+ *
+ * Returns a bus state from enum bif_bus_state if successful or errno if an
+ * error occurred.
+ */
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid controller handle.\n");
+		return -ENODEV;
+	}
+
+	rc = ctrl->bdev->desc->ops->get_bus_state(ctrl->bdev);
+	if (rc < 0)
+		pr_err("Error during bus state retrieval, rc=%d\n", rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_get_bus_state);
+
+/**
+ * bif_ctrl_set_bus_state() - set the state of the BIF bus
+ * @ctrl:	BIF controller consumer handle
+ * @state:	State for the BIF bus to enter
+ *
+ * Returns 0 on success or errno if an error occurred.
+ */
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state)
+{
+	int rc;
+
+	if (IS_ERR_OR_NULL(ctrl)) {
+		pr_err("Invalid controller handle.\n");
+		return -ENODEV;
+	}
+
+	bif_ctrl_lock(ctrl);
+
+	rc = ctrl->bdev->desc->ops->set_bus_state(ctrl->bdev, state);
+	if (rc < 0)
+		pr_err("Error during bus state configuration, rc=%d\n", rc);
+
+	/*
+	 * Uncache the selected slave if the new bus state results in the slave
+	 * becoming unselected.
+	 */
+	if (state == BIF_BUS_STATE_MASTER_DISABLED
+	    || state == BIF_BUS_STATE_POWER_DOWN
+	    || state == BIF_BUS_STATE_STANDBY)
+		ctrl->bdev->selected_sdev = NULL;
+
+	bif_ctrl_unlock(ctrl);
+
+	return rc;
+}
+EXPORT_SYMBOL(bif_ctrl_set_bus_state);
+
+/*
+ * Check if the specified function is a protocol function and if it is, then
+ * instantiate protocol function data for the slave.
+ */
+static int bif_initialize_protocol_function(struct bif_slave_dev *sdev,
+		struct bif_ddb_l2_data *func)
+{
+	int rc = 0;
+	u8 buf[4];
+
+	/* Ensure that this is a protocol function. */
+	if (func->function_type != BIF_FUNC_PROTOCOL)
+		return 0;
+
+	if (sdev->protocol_function) {
+		pr_err("Duplicate protocol function found for BIF slave; DEV_ADR=0x%02X\n",
+			sdev->slave_addr);
+		return -EPERM;
+	}
+
+	sdev->protocol_function = kzalloc(sizeof(struct bif_protocol_function),
+						GFP_KERNEL);
+	if (!sdev->protocol_function) {
+		pr_err("out of memory\n");
+		return -ENOMEM;
+	}
+
+	rc = _bif_slave_read(sdev, func->function_pointer, buf, 4);
+	if (rc) {
+		pr_err("Protocol function data read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	sdev->protocol_function->protocol_pointer  = buf[0] << 8 | buf[1];
+	sdev->protocol_function->device_id_pointer = buf[2] << 8 | buf[3];
+	sdev->protocol_function->l2_entry = func;
+
+	rc = _bif_slave_read(sdev, sdev->protocol_function->device_id_pointer,
+		sdev->protocol_function->device_id, BIF_DEVICE_ID_BYTE_LENGTH);
+	if (rc) {
+		pr_err("Device ID read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Check if this slave does not have a UID value stored. */
+	if (sdev->unique_id_bits_known == 0) {
+		sdev->unique_id_bits_known = BIF_UNIQUE_ID_BIT_LENGTH;
+		/* Fill in UID using manufacturer ID and device ID. */
+		sdev->unique_id[0] = sdev->l1_data.manufacturer_id >> 8;
+		sdev->unique_id[1] = sdev->l1_data.manufacturer_id;
+		memcpy(&sdev->unique_id[2],
+			sdev->protocol_function->device_id,
+			BIF_DEVICE_ID_BYTE_LENGTH);
+	}
+
+	return rc;
+}
+
+/*
+ * Check if the specified function is a slave control function and if it is,
+ * then instantiate slave control function data for the slave.
+ */
+static int bif_initialize_slave_control_function(struct bif_slave_dev *sdev,
+		struct bif_ddb_l2_data *func)
+{
+	int rc = 0;
+	int i;
+	u8 buf[3];
+
+	/* Ensure that this is a slave control function. */
+	if (func->function_type != BIF_FUNC_SLAVE_CONTROL)
+		return 0;
+
+	if (sdev->slave_ctrl_function) {
+		pr_err("Duplicate slave control function found for BIF slave; DEV_ADR=0x%02X\n",
+			sdev->slave_addr);
+		return -EPERM;
+	}
+
+	sdev->slave_ctrl_function
+		= kzalloc(sizeof(struct bif_protocol_function), GFP_KERNEL);
+	if (!sdev->slave_ctrl_function) {
+		pr_err("out of memory\n");
+		return -ENOMEM;
+	}
+
+	rc = _bif_slave_read(sdev, func->function_pointer, buf, 3);
+	if (rc) {
+		pr_err("Slave control function data read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	sdev->slave_ctrl_function->slave_ctrl_pointer = buf[0] << 8 | buf[1];
+	sdev->slave_ctrl_function->task_count
+		= buf[2] * SLAVE_CTRL_TASKS_PER_SET;
+	sdev->slave_ctrl_function->l2_entry = func;
+
+	if (sdev->slave_ctrl_function->task_count > 0) {
+		sdev->slave_ctrl_function->irq_notifier_list =
+			kzalloc(sizeof(struct blocking_notifier_head)
+			    * sdev->slave_ctrl_function->task_count,
+			    GFP_KERNEL);
+		if (!sdev->slave_ctrl_function->irq_notifier_list) {
+			pr_err("out of memory\n");
+			kfree(sdev->slave_ctrl_function);
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < sdev->slave_ctrl_function->task_count; i++) {
+			BLOCKING_INIT_NOTIFIER_HEAD(
+			    &sdev->slave_ctrl_function->irq_notifier_list[i]);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * bif_crc_ccitt() - calculate the CRC-CCITT CRC value of the data specified
+ * @buffer:	Data to calculate the CRC of
+ * @len:	Length of the data buffer in bytes
+ *
+ * MIPI-BIF specifies the usage of CRC-CCITT for BIF data objects.  This
+ * function performs the CRC calculation while taking into account the bit
+ * ordering used by BIF.
+ */
+u16 bif_crc_ccitt(const u8 *buffer, unsigned int len)
+{
+	u16 crc = 0xFFFF;
+
+	while (len--) {
+		crc = crc_ccitt_byte(crc, bitrev8(*buffer));
+		buffer++;
+	}
+	return bitrev16(crc);
+}
+EXPORT_SYMBOL(bif_crc_ccitt);
+
+static u16 bif_object_crc_ccitt(const struct bif_object *object)
+{
+	u16 crc = 0xFFFF;
+	int i;
+
+	crc = crc_ccitt_byte(crc, bitrev8(object->type));
+	crc = crc_ccitt_byte(crc, bitrev8(object->version));
+	crc = crc_ccitt_byte(crc, bitrev8(object->manufacturer_id >> 8));
+	crc = crc_ccitt_byte(crc, bitrev8(object->manufacturer_id));
+	crc = crc_ccitt_byte(crc, bitrev8(object->length >> 8));
+	crc = crc_ccitt_byte(crc, bitrev8(object->length));
+
+	for (i = 0; i < object->length - 8; i++)
+		crc = crc_ccitt_byte(crc, bitrev8(object->data[i]));
+
+	return bitrev16(crc);
+}
+
+/*
+ * Check if the specified function is an NVM function and if it is, then
+ * instantiate NVM function data for the slave and read all objects.
+ */
+static int bif_initialize_nvm_function(struct bif_slave_dev *sdev,
+		struct bif_ddb_l2_data *func)
+{
+	int rc = 0;
+	int data_len;
+	u8 buf[8], object_type;
+	struct bif_object *object;
+	struct bif_object *temp;
+	u16 addr;
+	u16 crc;
+
+	/* Ensure that this is an NVM function. */
+	if (func->function_type != BIF_FUNC_NVM)
+		return 0;
+
+	if (sdev->nvm_function) {
+		pr_err("Duplicate NVM function found for BIF slave; DEV_ADR=0x%02X\n",
+			sdev->slave_addr);
+		return -EPERM;
+	}
+
+	sdev->nvm_function
+		= kzalloc(sizeof(*sdev->nvm_function), GFP_KERNEL);
+	if (!sdev->nvm_function) {
+		pr_err("out of memory\n");
+		return -ENOMEM;
+	}
+
+	rc = _bif_slave_read(sdev, func->function_pointer, buf, 8);
+	if (rc) {
+		pr_err("NVM function data read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	sdev->nvm_function->nvm_pointer		= buf[0] << 8 | buf[1];
+	sdev->nvm_function->slave_control_channel	= buf[2];
+	sdev->nvm_function->write_buffer_size		= buf[3];
+	sdev->nvm_function->nvm_base_address		= buf[4] << 8 | buf[5];
+	sdev->nvm_function->nvm_size			= buf[6] << 8 | buf[7];
+
+	INIT_LIST_HEAD(&sdev->nvm_function->object_list);
+
+	/* Read object list */
+	addr = sdev->nvm_function->nvm_base_address;
+	rc = _bif_slave_read(sdev, addr, &object_type, 1);
+	if (rc) {
+		pr_err("Slave memory read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Object type == 0x00 corresponds to the end of the object list. */
+	while (object_type != 0x00) {
+		object = kzalloc(sizeof(*object), GFP_KERNEL);
+		if (!object) {
+			pr_err("out of memory\n");
+			rc = -ENOMEM;
+			goto free_data;
+		}
+		list_add_tail(&object->list, &sdev->nvm_function->object_list);
+
+		rc = _bif_slave_read(sdev, addr + 1, buf + 1, 5);
+		if (rc) {
+			pr_err("Slave memory read of object header failed; addr=0x%04X, len=%d, rc=%d\n",
+				addr + 1, 5, rc);
+			goto free_data;
+		}
+
+		object->addr		= addr;
+		object->type		= object_type;
+		object->version		= buf[1];
+		object->manufacturer_id	= buf[2] << 8 | buf[3];
+		object->length		= buf[4] << 8 | buf[5];
+
+		if ((object->addr + object->length)
+		    > (sdev->nvm_function->nvm_base_address
+				+ sdev->nvm_function->nvm_size)) {
+			pr_warn("warning: BIF slave object is not formatted correctly; NVM base=0x%04X, NVM len=%d, object addr=0x%04X, object len=%d\n",
+				sdev->nvm_function->nvm_base_address,
+				sdev->nvm_function->nvm_size,
+				object->addr,
+				object->length);
+			/* Limit object size to remaining NVM size. */
+			object->length = sdev->nvm_function->nvm_size
+				+ sdev->nvm_function->nvm_base_address
+				- object->addr;
+		}
+
+		/* Object header + CRC takes up 8 bytes. */
+		data_len = object->length - 8;
+		object->data = kmalloc(data_len, GFP_KERNEL);
+		if (!object->data) {
+			pr_err("out of memory\n");
+			rc = -ENOMEM;
+			goto free_data;
+		}
+
+		rc = _bif_slave_read(sdev, addr + 6, object->data, data_len);
+		if (rc) {
+			pr_err("Slave memory read of object data failed; addr=0x%04X, len=%d, rc=%d\n",
+				addr + 6, data_len, rc);
+			goto free_data;
+		}
+
+		rc = _bif_slave_read(sdev, addr + 6 + data_len, buf, 3);
+		if (rc) {
+			pr_err("Slave memory read of object CRC failed; addr=0x%04X, len=%d, rc=%d\n",
+				addr + 6 + data_len, 3, rc);
+			goto free_data;
+		}
+
+		object->crc = buf[0] << 8 | buf[1];
+		object_type = buf[2];
+		sdev->nvm_function->object_count++;
+
+		crc = bif_object_crc_ccitt(object);
+		if (crc != object->crc)
+			pr_info("BIF object at addr=0x%04X has invalid CRC; crc calc=0x%04X, crc exp=0x%04X\n",
+				object->addr, crc, object->crc);
+
+		addr += object->length;
+	}
+
+	return rc;
+
+free_data:
+	list_for_each_entry_safe(object, temp,
+				&sdev->nvm_function->object_list, list) {
+		list_del(&object->list);
+		kfree(object->data);
+		kfree(object);
+	}
+	kfree(sdev->nvm_function);
+	sdev->nvm_function = NULL;
+	return rc;
+}
+
+static int bif_parse_slave_data(struct bif_slave_dev *sdev)
+{
+	int rc = 0;
+	u8 buf[10];
+	u8 *func_buf;
+	struct bif_ddb_l2_data *func;
+	int function_count, i;
+
+	rc = _bif_slave_read(sdev, BIF_DDB_L1_BASE_ADDR, buf, 10);
+	if (rc) {
+		pr_err("DDB L1 data read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	sdev->l1_data.revision		= buf[0];
+	sdev->l1_data.level		= buf[1];
+	sdev->l1_data.device_class	= buf[2] << 8 | buf[3];
+	sdev->l1_data.manufacturer_id	= buf[4] << 8 | buf[5];
+	sdev->l1_data.product_id	= buf[6] << 8 | buf[7];
+	sdev->l1_data.length		= buf[8] << 8 | buf[9];
+
+	function_count = sdev->l1_data.length / 4;
+	if (sdev->l1_data.length % 4) {
+		pr_err("Function directory length=%d is invalid\n",
+				sdev->l1_data.length);
+		return -EPROTO;
+	}
+
+	/* No DDB L2 function directory */
+	if (function_count == 0)
+		return 0;
+
+	func_buf = kmalloc(sdev->l1_data.length, GFP_KERNEL);
+	if (!func_buf) {
+		pr_err("out of memory\n");
+		return -ENOMEM;
+	}
+
+	sdev->function_directory = kzalloc(
+		function_count * sizeof(struct bif_ddb_l2_data), GFP_KERNEL);
+	if (!sdev->function_directory) {
+		pr_err("out of memory\n");
+		return -ENOMEM;
+	}
+
+	rc = _bif_slave_read(sdev, BIF_DDB_L2_BASE_ADDR, func_buf,
+				sdev->l1_data.length);
+	if (rc) {
+		pr_err("DDB L2 data read failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < function_count; i++) {
+		func = &sdev->function_directory[i];
+		func->function_type	= func_buf[i * 4];
+		func->function_version	= func_buf[i * 4 + 1];
+		func->function_pointer	= func_buf[i * 4 + 2] << 8
+					  | func_buf[i * 4 + 3];
+		rc = bif_initialize_protocol_function(sdev, func);
+		if (rc)
+			goto done;
+		rc = bif_initialize_slave_control_function(sdev, func);
+		if (rc)
+			goto done;
+		rc = bif_initialize_nvm_function(sdev, func);
+		if (rc)
+			goto done;
+	}
+done:
+	kfree(func_buf);
+	return rc;
+}
+
+static int bif_add_secondary_slaves(struct bif_slave_dev *primary_slave)
+{
+	int rc = 0;
+	int data_len, i;
+	u16 crc;
+	struct bif_slave_dev *sdev;
+	struct bif_object *object;
+
+	list_for_each_entry(object, &primary_slave->nvm_function->object_list,
+				list) {
+		if (object->type != BIF_OBJ_SEC_SLAVE)
+			continue;
+
+		data_len = object->length - 8;
+		if (data_len % BIF_UNIQUE_ID_BYTE_LENGTH) {
+			pr_info("Invalid secondary slave object found, addr=0x%04X, data len=%d\n",
+				object->addr, data_len);
+			continue;
+		}
+
+		crc = bif_object_crc_ccitt(object);
+		if (crc != object->crc) {
+			pr_info("BIF object at addr=0x%04X has invalid CRC; crc calc=0x%04X, crc exp=0x%04X\n",
+				object->addr, crc, object->crc);
+			continue;
+		}
+
+		for (i = 0; i < data_len / BIF_UNIQUE_ID_BYTE_LENGTH; i++) {
+			sdev = bif_add_slave(primary_slave->bdev);
+			if (IS_ERR(sdev)) {
+				rc = PTR_ERR(sdev);
+				pr_err("bif_add_slave failed, rc=%d\n", rc);
+				return rc;
+			}
+			memcpy(sdev->unique_id,
+				&object->data[i * BIF_UNIQUE_ID_BYTE_LENGTH],
+				BIF_UNIQUE_ID_BYTE_LENGTH);
+			sdev->unique_id_bits_known = BIF_UNIQUE_ID_BIT_LENGTH;
+
+			rc = bif_select_slave(sdev);
+			if (rc) {
+				pr_err("Could not select slave, rc=%d\n", rc);
+				goto free_slave;
+			}
+
+			rc = bif_is_slave_selected(sdev->bdev);
+			if (rc < 0) {
+				pr_err("Transaction failed, rc=%d\n", rc);
+				goto free_slave;
+			} else if (rc == 1) {
+				sdev->present = true;
+				sdev->bdev->selected_sdev = sdev;
+			} else {
+				sdev->present = false;
+				sdev->bdev->selected_sdev = NULL;
+			}
+		}
+	}
+
+	return rc;
+
+free_slave:
+	bif_remove_slave(sdev);
+	return rc;
+}
+
+/*
+ * Performs UID search to identify all slaves attached to the bus. Assumes that
+ * all necessary locks are held.
+ */
+static int bif_perform_uid_search(struct bif_ctrl_dev *bdev)
+{
+	struct bif_slave_dev *sdev;
+	struct bif_slave_dev *new_slave;
+	bool resp[2], resp_dilc;
+	int i;
+	int rc = 0;
+	u8 cmd_probe[2] = {BIF_CMD_DIP0, BIF_CMD_DIP1};
+	u8 cmd_enter[2] = {BIF_CMD_DIE0, BIF_CMD_DIE1};
+
+	/*
+	 * Iterate over all partially known UIDs adding new ones as they are
+	 * found.
+	 */
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		/* Skip slaves with fully known UIDs. */
+		if (sdev->unique_id_bits_known == BIF_UNIQUE_ID_BIT_LENGTH
+		    || sdev->bdev != bdev)
+			continue;
+
+		/* Begin a new UID search. */
+		rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_BC,
+							BIF_CMD_DISS);
+		if (rc) {
+			pr_err("bus_transaction failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Step through all known UID bits (MSB to LSB). */
+		for (i = 0; i < sdev->unique_id_bits_known; i++) {
+			rc = bdev->desc->ops->bus_transaction(bdev,
+				BIF_TRANS_BC,
+				cmd_enter[get_uid_bit(sdev->unique_id, i)]);
+			if (rc) {
+				pr_err("bus_transaction failed, rc=%d\n", rc);
+				return rc;
+			}
+		}
+
+		/* Step through unknown UID bits. */
+		for (i = sdev->unique_id_bits_known;
+				i < BIF_UNIQUE_ID_BIT_LENGTH; i++) {
+			rc = bdev->desc->ops->bus_transaction_query(bdev,
+				BIF_TRANS_BC, cmd_probe[0], &resp[0]);
+			if (rc) {
+				pr_err("bus_transaction failed, rc=%d\n", rc);
+				return rc;
+			}
+
+			rc = bdev->desc->ops->bus_transaction_query(bdev,
+				BIF_TRANS_BC, cmd_probe[1], &resp[1]);
+			if (rc) {
+				pr_err("bus_transaction failed, rc=%d\n", rc);
+				return rc;
+			}
+
+			if (resp[0] && resp[1]) {
+				/* Create an entry for the new UID branch. */
+				new_slave = bif_add_slave(bdev);
+				if (IS_ERR(new_slave)) {
+					rc = PTR_ERR(sdev);
+					pr_err("bif_add_slave failed, rc=%d\n",
+						rc);
+					return rc;
+				}
+				memcpy(new_slave->unique_id, sdev->unique_id,
+					BIF_UNIQUE_ID_BYTE_LENGTH);
+				new_slave->bdev = sdev->bdev;
+
+				set_uid_bit(sdev->unique_id, i, 0);
+				sdev->unique_id_bits_known = i + 1;
+
+				set_uid_bit(new_slave->unique_id, i, 1);
+				new_slave->unique_id_bits_known = i + 1;
+			} else if (resp[0]) {
+				set_uid_bit(sdev->unique_id, i, 0);
+				sdev->unique_id_bits_known = i + 1;
+			} else if (resp[1]) {
+				set_uid_bit(sdev->unique_id, i, 1);
+				sdev->unique_id_bits_known = i + 1;
+			} else {
+				pr_debug("no bus query response received\n");
+				rc = -ENXIO;
+				return rc;
+			}
+
+			rc = bdev->desc->ops->bus_transaction(bdev,
+				BIF_TRANS_BC, cmd_enter[resp[0] ? 0 : 1]);
+			if (rc) {
+				pr_err("bus_transaction failed, rc=%d\n", rc);
+				return rc;
+			}
+		}
+
+		rc = bdev->desc->ops->bus_transaction_query(bdev,
+			BIF_TRANS_BC, BIF_CMD_DILC, &resp_dilc);
+		if (rc) {
+			pr_err("bus_transaction failed, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (resp_dilc) {
+			sdev->present = true;
+			sdev->bdev->selected_sdev = sdev;
+			rc = bif_parse_slave_data(sdev);
+		} else {
+			pr_err("Slave failed to respond to DILC bus command; its UID is thus unverified.\n");
+			sdev->unique_id_bits_known = 0;
+			rc = -ENXIO;
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Removes slaves from the bif_sdev_list which have the same UID as previous
+ * slaves in the list.
+ */
+static int bif_remove_duplicate_slaves(struct bif_ctrl_dev *bdev)
+{
+	struct bif_slave_dev *sdev;
+	struct bif_slave_dev *last_slave;
+	struct bif_slave_dev *temp;
+
+	list_for_each_entry_safe(last_slave, temp, &bif_sdev_list, list) {
+		list_for_each_entry(sdev, &bif_sdev_list, list) {
+			if (last_slave == sdev) {
+				break;
+			} else if (memcmp(last_slave->unique_id,
+					sdev->unique_id,
+					BIF_UNIQUE_ID_BYTE_LENGTH) == 0) {
+				bif_remove_slave(last_slave);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int bif_add_all_slaves(struct bif_ctrl_dev *bdev)
+{
+	struct bif_slave_dev *sdev;
+	int rc = 0;
+	int i;
+	bool has_slave = false, is_primary_slave = false;
+
+	mutex_lock(&bif_sdev_list_mutex);
+	mutex_lock(&bdev->mutex);
+
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		if (sdev->bdev == bdev) {
+			has_slave = true;
+			break;
+		}
+	}
+
+	if (!has_slave) {
+		/* Create a single empty slave to start the search algorithm. */
+		sdev = bif_add_slave(bdev);
+		if (IS_ERR(sdev)) {
+			rc = PTR_ERR(sdev);
+			pr_err("bif_add_slave failed, rc=%d\n", rc);
+			goto out;
+		}
+
+		for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+			/* Attempt to select primary slave in battery pack. */
+			rc = bdev->desc->ops->bus_transaction(bdev,
+				BIF_TRANS_SDA, BIF_PRIMARY_SLAVE_DEV_ADR);
+			if (rc == 0)
+				break;
+		}
+		if (rc) {
+			pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+			goto out;
+		}
+
+		/* Check if a slave is selected. */
+		rc = bif_is_slave_selected(bdev);
+		if (rc < 0) {
+			pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+			goto out;
+		} else {
+			is_primary_slave = rc;
+		}
+	}
+
+	if (is_primary_slave) {
+		pr_debug("Using primary slave at DEV_ADR==0x%02X\n",
+			BIF_PRIMARY_SLAVE_DEV_ADR);
+		sdev->bdev->selected_sdev = sdev;
+		sdev->present = true;
+		sdev->slave_addr = BIF_PRIMARY_SLAVE_DEV_ADR;
+		rc = bif_parse_slave_data(sdev);
+		if (rc) {
+			pr_err("Failed to parse primary slave data, rc=%d\n",
+				rc);
+			goto out;
+		}
+		rc = bif_add_secondary_slaves(sdev);
+		if (rc) {
+			pr_err("Failed to add secondary slaves, rc=%d\n", rc);
+			goto out;
+		}
+	} else {
+		pr_debug("Falling back on full UID search.\n");
+		for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+			rc = bif_perform_uid_search(bdev);
+			if (rc == 0)
+				break;
+		}
+		if (rc) {
+			pr_debug("BIF UID search failed, rc=%d\n", rc);
+			goto out;
+		}
+	}
+
+	bif_remove_duplicate_slaves(bdev);
+
+	mutex_unlock(&bdev->mutex);
+	mutex_unlock(&bif_sdev_list_mutex);
+
+	return rc;
+
+out:
+	mutex_unlock(&bdev->mutex);
+	mutex_unlock(&bif_sdev_list_mutex);
+	pr_debug("BIF slave search failed, rc=%d\n", rc);
+	return rc;
+}
+
+static int bif_add_known_slave(struct bif_ctrl_dev *bdev, u8 slave_addr)
+{
+	struct bif_slave_dev *sdev;
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < BIF_TRANSACTION_RETRY_COUNT; i++) {
+		/* Attempt to select the slave. */
+		rc = bdev->desc->ops->bus_transaction(bdev, BIF_TRANS_SDA,
+							slave_addr);
+		if (rc == 0)
+			break;
+	}
+	if (rc) {
+		pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Check if a slave is selected. */
+	rc = bif_is_slave_selected(bdev);
+	if (rc < 0) {
+		pr_err("BIF bus_transaction failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	sdev = bif_add_slave(bdev);
+	if (IS_ERR(sdev)) {
+		rc = PTR_ERR(sdev);
+		pr_err("bif_add_slave failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	sdev->bdev->selected_sdev = sdev;
+	sdev->present = true;
+	sdev->slave_addr = slave_addr;
+	rc = bif_parse_slave_data(sdev);
+	if (rc) {
+		pr_err("Failed to parse slave data, addr=0x%02X, rc=%d\n",
+			slave_addr, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int bif_add_known_slaves_from_dt(struct bif_ctrl_dev *bdev,
+					struct device_node *of_node)
+{
+	int len = 0;
+	int rc, i;
+	u32 addr;
+	const __be32 *val;
+
+	mutex_lock(&bif_sdev_list_mutex);
+	mutex_lock(&bdev->mutex);
+
+	val = of_get_property(of_node, "qcom,known-device-addresses", &len);
+	len /= sizeof(u32);
+	if (val && len == 0) {
+		pr_err("qcom,known-device-addresses property is invalid\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < len; i++) {
+		addr = be32_to_cpup(val++);
+		if (addr == 0x00 || addr > 0xFF) {
+			rc = -EINVAL;
+			pr_err("qcom,known-device-addresses property contains invalid address=0x%X\n",
+				addr);
+			goto out;
+		}
+		rc = bif_add_known_slave(bdev, addr);
+		if (rc) {
+			pr_err("bif_add_known_slave() failed, rc=%d\n", rc);
+			goto out;
+		}
+	}
+
+out:
+	if (len > 0)
+		bif_remove_duplicate_slaves(bdev);
+
+	mutex_unlock(&bdev->mutex);
+	mutex_unlock(&bif_sdev_list_mutex);
+
+	return rc;
+}
+
+/*
+ * Programs a device address for the specified slave in order to simplify
+ * slave selection in the future.
+ */
+static int bif_assign_slave_dev_addr(struct bif_slave_dev *sdev, u8 dev_addr)
+{
+	int rc;
+	u16 addr;
+
+	if (!sdev->protocol_function) {
+		pr_err("Protocol function not present; cannot set device address.\n");
+		return -ENODEV;
+	}
+
+	addr = PROTOCOL_FUNC_DEV_ADR_ADDR(
+			sdev->protocol_function->protocol_pointer);
+
+	rc = _bif_slave_write(sdev, addr, &dev_addr, 1);
+	if (rc)
+		pr_err("Failed to set slave device address.\n");
+	else
+		sdev->slave_addr = dev_addr;
+
+	return rc;
+}
+
+/* Assigns a unique device address to all slaves which do not have one. */
+static int bif_assign_all_slaves_dev_addr(struct bif_ctrl_dev *bdev)
+{
+	struct bif_slave_dev *sdev;
+	struct bif_slave_dev *sibling;
+	bool duplicate;
+	int rc = 0;
+	u8 dev_addr, first_dev_addr;
+
+	mutex_lock(&bif_sdev_list_mutex);
+	mutex_lock(&bdev->mutex);
+
+	first_dev_addr = next_dev_addr;
+	/*
+	 * Iterate over all partially known UIDs adding new ones as they are
+	 * found.
+	 */
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		/*
+		 * Skip slaves without known UIDs, which already have a device
+		 * address or which aren't present.
+		 */
+		if (sdev->unique_id_bits_known != BIF_UNIQUE_ID_BIT_LENGTH
+		    || sdev->slave_addr != 0x00 || !sdev->present)
+			continue;
+
+		do {
+			dev_addr = next_dev_addr;
+			duplicate = false;
+			list_for_each_entry(sibling, &bif_sdev_list, list) {
+				if (sibling->slave_addr == dev_addr) {
+					duplicate = true;
+					break;
+				}
+			}
+
+			next_dev_addr = dev_addr + 1;
+		} while (duplicate && (next_dev_addr != first_dev_addr));
+
+		if (next_dev_addr == first_dev_addr) {
+			pr_err("No more BIF slave device addresses available.\n");
+			rc = -ENODEV;
+			goto out;
+		}
+
+		rc =  bif_assign_slave_dev_addr(sdev, dev_addr);
+		if (rc) {
+			pr_err("Failed to set slave address.\n");
+			goto out;
+		}
+	}
+
+	mutex_unlock(&bdev->mutex);
+	mutex_unlock(&bif_sdev_list_mutex);
+
+	return rc;
+
+out:
+	mutex_unlock(&bdev->mutex);
+	mutex_unlock(&bif_sdev_list_mutex);
+	pr_err("BIF slave device address setting failed, rc=%d\n", rc);
+	return rc;
+}
+
+/**
+ * bdev_get_drvdata() - get the private BIF controller driver data
+ * @bdev:	BIF controller device pointer
+ */
+void *bdev_get_drvdata(struct bif_ctrl_dev *bdev)
+{
+	return bdev->driver_data;
+}
+EXPORT_SYMBOL(bdev_get_drvdata);
+
+static const char * const battery_label[] = {
+	"unknown",
+	"none",
+	"special 1",
+	"special 2",
+	"special 3",
+	"low cost",
+	"smart",
+};
+
+static const char *bif_get_battery_pack_type(int rid_ohm)
+{
+	const char *label = battery_label[0];
+
+	if (rid_ohm > BIF_BATT_RID_SMART_MAX)
+		label = battery_label[1];
+	else if (rid_ohm >= BIF_BATT_RID_SMART_MIN)
+		label = battery_label[6];
+	else if (rid_ohm >= BIF_BATT_RID_LOW_COST_MIN
+			&& rid_ohm <= BIF_BATT_RID_LOW_COST_MAX)
+		label = battery_label[5];
+	else if (rid_ohm >= BIF_BATT_RID_SPECIAL3_MIN
+			&& rid_ohm <= BIF_BATT_RID_SPECIAL3_MAX)
+		label = battery_label[4];
+	else if (rid_ohm >= BIF_BATT_RID_SPECIAL2_MIN
+			&& rid_ohm <= BIF_BATT_RID_SPECIAL2_MAX)
+		label = battery_label[3];
+	else if (rid_ohm >= BIF_BATT_RID_SPECIAL1_MIN
+			&& rid_ohm <= BIF_BATT_RID_SPECIAL1_MAX)
+		label = battery_label[2];
+
+	return label;
+}
+
+/**
+ * bif_ctrl_register() - register a BIF controller with the BIF framework
+ * @bif_desc:		Pointer to BIF controller descriptor
+ * @dev:		Device pointer of the BIF controller
+ * @driver_data:	Private driver data to associate with the BIF controller
+ * @of_node		Pointer to the device tree node of the BIF controller
+ *
+ * Returns a BIF controller device pointer for the controller if registration
+ * is successful or an ERR_PTR if an error occurred.
+ */
+struct bif_ctrl_dev *bif_ctrl_register(struct bif_ctrl_desc *bif_desc,
+	struct device *dev, void *driver_data, struct device_node *of_node)
+{
+	struct bif_ctrl_dev *bdev = ERR_PTR(-EINVAL);
+	struct bif_slave_dev *sdev;
+	bool battery_present = false;
+	int rc, rid_ohm;
+
+	if (!bif_desc) {
+		pr_err("Invalid bif_desc specified\n");
+		return bdev;
+	} else if (!bif_desc->name) {
+		pr_err("BIF name missing\n");
+		return bdev;
+	} else if (!bif_desc->ops) {
+		pr_err("BIF operations missing\n");
+		return bdev;
+	} else if (!bif_desc->ops->bus_transaction
+			|| !bif_desc->ops->bus_transaction_query
+			|| !bif_desc->ops->bus_transaction_read
+			|| !bif_desc->ops->get_bus_state
+			|| !bif_desc->ops->set_bus_state) {
+		pr_err("BIF operation callback function(s) missing\n");
+		return bdev;
+	}
+
+	bdev = kzalloc(sizeof(struct bif_ctrl_dev), GFP_KERNEL);
+	if (bdev == NULL) {
+		pr_err("Memory allocation failed for bif_ctrl_dev\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_init(&bdev->mutex);
+	INIT_LIST_HEAD(&bdev->list);
+	INIT_DELAYED_WORK(&bdev->enter_irq_mode_work, bif_enter_irq_mode_work);
+	bdev->desc			= bif_desc;
+	bdev->ctrl_dev			= dev;
+	bdev->driver_data		= driver_data;
+	bdev->irq_mode_delay_jiffies	= 2;
+
+	mutex_lock(&bif_ctrl_list_mutex);
+	list_add_tail(&bdev->list, &bif_ctrl_list);
+	mutex_unlock(&bif_ctrl_list_mutex);
+
+	rc = bif_add_all_slaves(bdev);
+	if (rc)
+		pr_debug("Search for all slaves failed, rc=%d\n", rc);
+	rc = bif_add_known_slaves_from_dt(bdev, of_node);
+	if (rc)
+		pr_err("Adding slaves based on device tree addressed failed, rc=%d.\n",
+			rc);
+	rc = bif_assign_all_slaves_dev_addr(bdev);
+	if (rc)
+		pr_err("Failed to set slave device address, rc=%d\n", rc);
+
+	bif_print_slaves();
+
+	if (bdev->desc->ops->get_battery_presence) {
+		rc = bdev->desc->ops->get_battery_presence(bdev);
+		if (rc < 0) {
+			pr_err("Could not determine battery presence, rc=%d\n",
+				rc);
+		} else {
+			battery_present = rc;
+			pr_info("Battery pack present = %c\n", rc ? 'Y' : 'N');
+		}
+	}
+
+	if (bdev->desc->ops->get_battery_rid) {
+		rid_ohm = bdev->desc->ops->get_battery_rid(bdev);
+		if (rid_ohm >= 0)
+			pr_info("Battery pack type = %s (Rid=%d ohm)\n",
+				bif_get_battery_pack_type(rid_ohm), rid_ohm);
+		else
+			pr_err("Could not read Rid, rc=%d\n", rid_ohm);
+	}
+
+	list_for_each_entry(sdev, &bif_sdev_list, list) {
+		if (sdev->present) {
+			battery_present = true;
+			break;
+		}
+	}
+
+	BLOCKING_INIT_NOTIFIER_HEAD(&bdev->bus_change_notifier);
+
+	if (battery_present) {
+		bdev->battery_present = true;
+		rc = blocking_notifier_call_chain(&bdev->bus_change_notifier,
+			BIF_BUS_EVENT_BATTERY_INSERTED, bdev);
+		if (rc)
+			pr_err("Call chain noification failed, rc=%d\n", rc);
+	}
+
+	return bdev;
+}
+EXPORT_SYMBOL(bif_ctrl_register);
+
+/**
+ * bif_ctrl_unregister() - unregisters a BIF controller
+ * @bdev:	BIF controller device pointer
+ */
+void bif_ctrl_unregister(struct bif_ctrl_dev *bdev)
+{
+	if (bdev) {
+		mutex_lock(&bif_ctrl_list_mutex);
+		list_del(&bdev->list);
+		mutex_unlock(&bif_ctrl_list_mutex);
+	}
+}
+EXPORT_SYMBOL(bif_ctrl_unregister);
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 718df02..d7c69db 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2010, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2010, 2013 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,9 +20,43 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/rfkill.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+
+static struct of_device_id ar3002_match_table[] = {
+	{	.compatible = "qca,ar3002" },
+	{}
+};
+
+static int bt_reset_gpio;
 
 static bool previous;
 
+static int bluetooth_power(int on)
+{
+	int rc;
+
+	pr_debug("%s  bt_gpio= %d\n", __func__, bt_reset_gpio);
+	if (on) {
+		rc = gpio_direction_output(bt_reset_gpio, 1);
+		if (rc) {
+			pr_err("%s: Unable to set direction\n", __func__);
+			return rc;
+		}
+		msleep(100);
+	} else {
+		gpio_set_value(bt_reset_gpio, 0);
+		rc = gpio_direction_input(bt_reset_gpio);
+		if (rc) {
+			pr_err("%s: Unable to set direction\n", __func__);
+			return rc;
+		}
+		msleep(100);
+	}
+	return 0;
+}
+
 static int bluetooth_toggle_radio(void *data, bool blocked)
 {
 	int ret = 0;
@@ -90,8 +124,36 @@
 	dev_dbg(&pdev->dev, "%s\n", __func__);
 
 	if (!pdev->dev.platform_data) {
-		dev_err(&pdev->dev, "platform data not initialized\n");
-		return -ENOSYS;
+		/* Update the platform data if the
+		device node exists as part of device tree.*/
+		if (pdev->dev.of_node) {
+			pdev->dev.platform_data = bluetooth_power;
+		} else {
+			dev_err(&pdev->dev, "device node not set\n");
+			return -ENOSYS;
+		}
+	}
+	if (pdev->dev.of_node) {
+		bt_reset_gpio = of_get_named_gpio(pdev->dev.of_node,
+							"qca,bt-reset-gpio", 0);
+		if (bt_reset_gpio < 0) {
+			pr_err("bt-reset-gpio not available");
+			return bt_reset_gpio;
+		}
+	}
+
+	ret = gpio_request(bt_reset_gpio, "bt sys_rst_n");
+	if (ret) {
+		pr_err("%s: unable to request gpio %d (%d)\n",
+			__func__, bt_reset_gpio, ret);
+		return ret;
+	}
+
+	/* When booting up, de-assert BT reset pin */
+	ret = gpio_direction_output(bt_reset_gpio, 0);
+	if (ret) {
+		pr_err("%s: Unable to set direction\n", __func__);
+		return ret;
 	}
 
 	ret = bluetooth_power_rfkill_probe(pdev);
@@ -114,6 +176,7 @@
 	.driver = {
 		.name = "bt_power",
 		.owner = THIS_MODULE,
+		.of_match_table = ar3002_match_table,
 	},
 };
 
diff --git a/drivers/char/diag/diagchar_hdlc.c b/drivers/char/diag/diagchar_hdlc.c
index b94ea2f..2369c4d 100644
--- a/drivers/char/diag/diagchar_hdlc.c
+++ b/drivers/char/diag/diagchar_hdlc.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2008-2009, 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2009, 2012-2013, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -172,11 +173,14 @@
 	uint8_t src_byte;
 
 	int pkt_bnd = 0;
+	int msg_start;
 
 	if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
 	    (hdlc->src_size - hdlc->src_idx > 0) &&
 	    (hdlc->dest_size - hdlc->dest_idx > 0)) {
 
+		msg_start = (hdlc->src_idx == 0) ? 1 : 0;
+
 		src_ptr = hdlc->src_ptr;
 		src_ptr = &src_ptr[hdlc->src_idx];
 		src_length = hdlc->src_size - hdlc->src_idx;
@@ -203,8 +207,16 @@
 				}
 			} else if (src_byte == CONTROL_CHAR) {
 				dest_ptr[len++] = src_byte;
-				pkt_bnd = 1;
+				/*
+				 * If this is the first byte in the message,
+				 * then it is part of the command. Otherwise,
+				 * consider it as the last byte of the
+				 * message.
+				 */
+				if (msg_start && i == 0 && src_length > 1)
+					continue;
 				i++;
+				pkt_bnd = 1;
 				break;
 			} else {
 				dest_ptr[len++] = src_byte;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 7f4edd1..2aca8cf 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -993,10 +993,18 @@
 
 	ret = diag_hdlc_decode(&hdlc);
 
-	if (hdlc.dest_idx < 3) {
-		pr_err("diag: Integer underflow in hdlc processing\n");
+	/*
+	 * If the message is 3 bytes or less in length then the message is
+	 * too short. A message will need 4 bytes minimum, since there are
+	 * 2 bytes for the CRC and 1 byte for the ending 0x7e for the hdlc
+	 * encoding
+	 */
+	if (hdlc.dest_idx < 4) {
+		pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
+			__func__, len, hdlc.dest_idx);
 		return;
 	}
+
 	if (ret) {
 		type = diag_process_apps_pkt(driver->hdlc_buf,
 							  hdlc.dest_idx - 3);
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
index aa55578..616c498 100644
--- a/drivers/char/diag/diagfwd_hsic.c
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -45,6 +45,7 @@
 	struct diag_hsic_dev *hsic_struct = container_of(work,
 				struct diag_hsic_dev, diag_read_hsic_work);
 	int index = hsic_struct->id;
+	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
 
 	if (!diag_hsic[index].hsic_ch) {
 		pr_err("DIAG in %s: diag_hsic[index].hsic_ch == 0\n", __func__);
@@ -103,7 +104,8 @@
 				diagmem_free(driver, buf_in_hsic,
 						index+POOL_TYPE_HSIC);
 
-				pr_err_ratelimited("diag: Error initiating HSIC read, err: %d\n",
+				if (__ratelimit(&rl))
+					pr_err("diag: Error initiating HSIC read, err: %d\n",
 					err);
 				/*
 				 * An error occurred, discontinue queuing
@@ -132,6 +134,7 @@
 {
 	int err = -2;
 	int index = (int)ctxt;
+	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
 
 	if (!diag_hsic[index].hsic_ch) {
 		/*
@@ -164,7 +167,8 @@
 			if (err) {
 				diagmem_free(driver, buf, index +
 							POOL_TYPE_HSIC);
-				pr_err_ratelimited("diag: In %s, error calling diag_device_write, err: %d\n",
+				if (__ratelimit(&rl))
+					pr_err("diag: In %s, error calling diag_device_write, err: %d\n",
 					__func__, err);
 			}
 		}
diff --git a/drivers/coresight/Kconfig b/drivers/coresight/Kconfig
index c77df95..5e00570 100644
--- a/drivers/coresight/Kconfig
+++ b/drivers/coresight/Kconfig
@@ -24,6 +24,14 @@
 config HAVE_CORESIGHT_SINK
 	bool
 
+config CORESIGHT_CTI
+	bool "CoreSight Cross Trigger Interface driver"
+	help
+	  This driver provides support for Cross Trigger Interface that is
+	  used to input or output i.e. pass cross trigger events from one
+	  hardware component to another. It can also be used to pass
+	  software generated events.
+
 config CORESIGHT_CSR
 	bool "CoreSight Slave Register driver"
 	help
@@ -32,6 +40,7 @@
 
 config CORESIGHT_TMC
 	bool "CoreSight Trace Memory Controller driver"
+	select CORESIGHT_CTI
 	select CORESIGHT_CSR
 	select HAVE_CORESIGHT_SINK
 	help
diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile
index 8c73794..0595064 100644
--- a/drivers/coresight/Makefile
+++ b/drivers/coresight/Makefile
@@ -3,6 +3,7 @@
 #
 obj-$(CONFIG_CORESIGHT) += coresight.o
 obj-$(CONFIG_OF) += of_coresight.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
 obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
 obj-$(CONFIG_CORESIGHT_TMC) += coresight-tmc.o
 obj-$(CONFIG_CORESIGHT_TPIU) += coresight-tpiu.o
diff --git a/drivers/coresight/coresight-cti.c b/drivers/coresight/coresight-cti.c
new file mode 100644
index 0000000..e077edf
--- /dev/null
+++ b/drivers/coresight/coresight-cti.c
@@ -0,0 +1,481 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/of_coresight.h>
+#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
+
+#include "coresight-priv.h"
+
+#define cti_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define cti_readl(drvdata, off)		__raw_readl(drvdata->base + off)
+
+#define CTI_LOCK(drvdata)						\
+do {									\
+	mb();								\
+	cti_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define CTI_UNLOCK(drvdata)						\
+do {									\
+	cti_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb();								\
+} while (0)
+
+#define CTICONTROL		(0x000)
+#define CTIINTACK		(0x010)
+#define CTIAPPSET		(0x014)
+#define CTIAPPCLEAR		(0x018)
+#define CTIAPPPULSE		(0x01C)
+#define CTIINEN(n)		(0x020 + (n * 4))
+#define CTIOUTEN(n)		(0x0A0 + (n * 4))
+#define CTITRIGINSTATUS		(0x130)
+#define CTITRIGOUTSTATUS	(0x134)
+#define CTICHINSTATUS		(0x138)
+#define CTICHOUTSTATUS		(0x13C)
+#define CTIGATE			(0x140)
+#define ASICCTL			(0x144)
+#define ITCHINACK		(0xEDC)
+#define ITTRIGINACK		(0xEE0)
+#define ITCHOUT			(0xEE4)
+#define ITTRIGOUT		(0xEE8)
+#define ITCHOUTACK		(0xEEC)
+#define ITTRIGOUTACK		(0xEF0)
+#define ITCHIN			(0xEF4)
+#define ITTRIGIN		(0xEF8)
+
+#define CTI_MAX_TRIGGERS	(8)
+#define CTI_MAX_CHANNELS	(4)
+
+#define to_cti_drvdata(c) container_of(c, struct cti_drvdata, cti)
+
+struct cti_drvdata {
+	void __iomem			*base;
+	struct device			*dev;
+	struct coresight_device		*csdev;
+	struct clk			*clk;
+	struct mutex			mutex;
+	struct coresight_cti		cti;
+	int				refcnt;
+};
+
+static LIST_HEAD(cti_list);
+static DEFINE_MUTEX(cti_lock);
+
+static int cti_verify_bounds(int trig, int ch)
+{
+	if (trig >= CTI_MAX_TRIGGERS)
+		return -EINVAL;
+
+	if (ch >= CTI_MAX_CHANNELS)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cti_enable(struct cti_drvdata *drvdata)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	CTI_UNLOCK(drvdata);
+
+	cti_writel(drvdata, 0x1, CTICONTROL);
+
+	CTI_LOCK(drvdata);
+	return 0;
+}
+
+static void __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIINEN(trig));
+	cti_writel(drvdata, (ctien | 0x1 << ch), CTIINEN(trig));
+
+	CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+
+	ret = cti_verify_bounds(trig, ch);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->refcnt == 0) {
+		ret = cti_enable(drvdata);
+		if (ret)
+			goto err;
+	}
+	drvdata->refcnt++;
+
+	__cti_map_trigin(drvdata, trig, ch);
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigin);
+
+static void __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIOUTEN(trig));
+	cti_writel(drvdata, (ctien | 0x1 << ch), CTIOUTEN(trig));
+
+	CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+
+	ret = cti_verify_bounds(trig, ch);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->refcnt == 0) {
+		ret = cti_enable(drvdata);
+		if (ret)
+			goto err;
+	}
+	drvdata->refcnt++;
+
+	__cti_map_trigout(drvdata, trig, ch);
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigout);
+
+static void cti_disable(struct cti_drvdata *drvdata)
+{
+	CTI_UNLOCK(drvdata);
+
+	cti_writel(drvdata, 0x1, CTICONTROL);
+
+	CTI_LOCK(drvdata);
+}
+
+static void __cti_unmap_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIINEN(trig));
+	cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIINEN(trig));
+
+	CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+
+	if (IS_ERR_OR_NULL(cti))
+		return;
+
+	if (cti_verify_bounds(trig, ch))
+		return;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+	__cti_unmap_trigin(drvdata, trig, ch);
+
+	if (drvdata->refcnt == 1)
+		cti_disable(drvdata);
+	drvdata->refcnt--;
+	mutex_unlock(&drvdata->mutex);
+
+	clk_disable_unprepare(drvdata->clk);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigin);
+
+static void __cti_unmap_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIOUTEN(trig));
+	cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIOUTEN(trig));
+
+	CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+
+	if (IS_ERR_OR_NULL(cti))
+		return;
+
+	if (cti_verify_bounds(trig, ch))
+		return;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+	__cti_unmap_trigout(drvdata, trig, ch);
+
+	if (drvdata->refcnt == 1)
+		cti_disable(drvdata);
+	drvdata->refcnt--;
+	mutex_unlock(&drvdata->mutex);
+
+	clk_disable_unprepare(drvdata->clk);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigout);
+
+struct coresight_cti *coresight_cti_get(const char *name)
+{
+	struct coresight_cti *cti;
+
+	mutex_lock(&cti_lock);
+	list_for_each_entry(cti, &cti_list, link) {
+		if (!strncmp(cti->name, name, strlen(cti->name) + 1)) {
+			mutex_unlock(&cti_lock);
+			return cti;
+		}
+	}
+	mutex_unlock(&cti_lock);
+
+	return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(coresight_cti_get);
+
+void coresight_cti_put(struct coresight_cti *cti)
+{
+}
+EXPORT_SYMBOL(coresight_cti_put);
+
+static ssize_t cti_store_map_trigin(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+	int ret;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	ret = coresight_cti_map_trigin(&drvdata->cti, val1, val2);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(map_trigin, S_IWUSR, NULL, cti_store_map_trigin);
+
+static ssize_t cti_store_map_trigout(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+	int ret;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	ret = coresight_cti_map_trigout(&drvdata->cti, val1, val2);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(map_trigout, S_IWUSR, NULL, cti_store_map_trigout);
+
+static ssize_t cti_store_unmap_trigin(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	coresight_cti_unmap_trigin(&drvdata->cti, val1, val2);
+
+	return size;
+}
+static DEVICE_ATTR(unmap_trigin, S_IWUSR, NULL, cti_store_unmap_trigin);
+
+static ssize_t cti_store_unmap_trigout(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	coresight_cti_unmap_trigout(&drvdata->cti, val1, val2);
+
+	return size;
+}
+static DEVICE_ATTR(unmap_trigout, S_IWUSR, NULL, cti_store_unmap_trigout);
+
+static struct attribute *cti_attrs[] = {
+	&dev_attr_map_trigin.attr,
+	&dev_attr_map_trigout.attr,
+	&dev_attr_unmap_trigin.attr,
+	&dev_attr_unmap_trigout.attr,
+	NULL,
+};
+
+static struct attribute_group cti_attr_grp = {
+	.attrs = cti_attrs,
+};
+
+static const struct attribute_group *cti_attr_grps[] = {
+	&cti_attr_grp,
+	NULL,
+};
+
+static int __devinit cti_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct cti_drvdata *drvdata;
+	struct resource *res;
+	struct coresight_desc *desc;
+
+	if (pdev->dev.of_node) {
+		pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+		pdev->dev.platform_data = pdata;
+	}
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	/* Store the driver data pointer for use in exported functions */
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	mutex_init(&drvdata->mutex);
+
+	drvdata->clk = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		return ret;
+
+	mutex_lock(&cti_lock);
+	drvdata->cti.name = ((struct coresight_platform_data *)
+			     (pdev->dev.platform_data))->name;
+	list_add_tail(&drvdata->cti.link, &cti_list);
+	mutex_unlock(&cti_lock);
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	desc->type = CORESIGHT_DEV_TYPE_NONE;
+	desc->pdata = pdev->dev.platform_data;
+	desc->dev = &pdev->dev;
+	desc->groups = cti_attr_grps;
+	desc->owner = THIS_MODULE;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev))
+		return PTR_ERR(drvdata->csdev);
+
+	dev_info(dev, "CTI initialized\n");
+	return 0;
+}
+
+static int __devexit cti_remove(struct platform_device *pdev)
+{
+	struct cti_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	coresight_unregister(drvdata->csdev);
+	return 0;
+}
+
+static struct of_device_id cti_match[] = {
+	{.compatible = "arm,coresight-cti"},
+	{}
+};
+
+static struct platform_driver cti_driver = {
+	.probe          = cti_probe,
+	.remove         = __devexit_p(cti_remove),
+	.driver         = {
+		.name   = "coresight-cti",
+		.owner	= THIS_MODULE,
+		.of_match_table = cti_match,
+	},
+};
+
+static int __init cti_init(void)
+{
+	return platform_driver_register(&cti_driver);
+}
+module_init(cti_init);
+
+static void __exit cti_exit(void)
+{
+	platform_driver_unregister(&cti_driver);
+}
+module_exit(cti_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight CTI driver");
diff --git a/drivers/coresight/coresight-etm.c b/drivers/coresight/coresight-etm.c
index 73c1499..2ae54ea 100644
--- a/drivers/coresight/coresight-etm.c
+++ b/drivers/coresight/coresight-etm.c
@@ -173,6 +173,8 @@
 #define ETM_REG_DUMP_VER_OFF		(4)
 #define ETM_REG_DUMP_VER		(1)
 
+#define CPMR_ETMCLKEN			(8)
+
 enum etm_addr_type {
 	ETM_ADDR_TYPE_NONE,
 	ETM_ADDR_TYPE_SINGLE,
@@ -318,11 +320,21 @@
 
 static void etm_set_pwrup(struct etm_drvdata *drvdata)
 {
+	uint32_t cpmr;
 	uint32_t etmpdcr;
 
-	etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
-	etmpdcr |= BIT(3);
-	etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+	 /* For Krait, use cp15 CPMR_ETMCLKEN instead of ETMPDCR since ETMPDCR
+	  * is not supported for this purpose on Krait v4.
+	  */
+	if (cpu_is_krait()) {
+		asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
+		cpmr  |= CPMR_ETMCLKEN;
+		asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
+	} else {
+		etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
+		etmpdcr |= BIT(3);
+		etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+	}
 	/* ensure pwrup completes before subsequent cp14 accesses */
 	mb();
 	isb();
@@ -330,14 +342,24 @@
 
 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
 {
+	uint32_t cpmr;
 	uint32_t etmpdcr;
 
 	/* ensure pending cp14 accesses complete before clearing pwrup */
 	mb();
 	isb();
-	etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
-	etmpdcr &= ~BIT(3);
-	etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+	 /* For Krait, use cp15 CPMR_ETMCLKEN instead of ETMPDCR since ETMPDCR
+	  * is not supported for this purpose on Krait v4.
+	  */
+	if (cpu_is_krait()) {
+		asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (cpmr));
+		cpmr  &= ~CPMR_ETMCLKEN;
+		asm volatile("mcr p15, 7, %0, c15, c0, 5" : : "r" (cpmr));
+	} else {
+		etmpdcr = etm_readl_mm(drvdata, ETMPDCR);
+		etmpdcr &= ~BIT(3);
+		etm_writel_mm(drvdata, etmpdcr, ETMPDCR);
+	}
 }
 
 static void etm_set_prog(struct etm_drvdata *drvdata)
diff --git a/drivers/coresight/coresight-stm.c b/drivers/coresight/coresight-stm.c
index bc72e02..1db499b 100644
--- a/drivers/coresight/coresight-stm.c
+++ b/drivers/coresight/coresight-stm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
 #include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/bitmap.h>
+#include <linux/of.h>
 #include <linux/of_coresight.h>
 #include <linux/coresight.h>
 #include <linux/coresight-stm.h>
@@ -35,6 +36,10 @@
 #define stm_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
 #define stm_readl(drvdata, off)		__raw_readl(drvdata->base + off)
 
+#define stm_data_writeb(val, addr)	__raw_writeb_no_log(val, addr)
+#define stm_data_writew(val, addr)	__raw_writew_no_log(val, addr)
+#define stm_data_writel(val, addr)	__raw_writel_no_log(val, addr)
+
 #define STM_LOCK(drvdata)						\
 do {									\
 	mb();								\
@@ -85,8 +90,10 @@
 #define STM_USERSPACE_MAGIC1_VAL	(0xf0)
 #define STM_USERSPACE_MAGIC2_VAL	(0xf1)
 
-#define OST_START_TOKEN			(0x30)
-#define OST_VERSION			(0x1)
+#define OST_TOKEN_STARTSIMPLE		(0x10)
+#define OST_TOKEN_STARTBASE		(0x30)
+#define OST_VERSION_PROP		(1)
+#define OST_VERSION_MIPI1		(16)
 
 enum stm_pkt_type {
 	STM_PKT_TYPE_DATA	= 0x98,
@@ -133,6 +140,7 @@
 	struct channel_space	chs;
 	bool			enable;
 	DECLARE_BITMAP(entities, OST_ENTITY_MAX);
+	bool			write_64bit;
 };
 
 static struct stm_drvdata *stmdrvdata;
@@ -342,7 +350,7 @@
 	clear_bit(ch, drvdata->chs.bitmap);
 }
 
-static int stm_send(void *addr, const void *data, uint32_t size)
+static int stm_send_64bit(void *addr, const void *data, uint32_t size)
 {
 	uint64_t prepad = 0;
 	uint64_t postpad = 0;
@@ -376,7 +384,10 @@
 		size -= 8;
 	}
 
+	endoff = 0;
+
 	if (size) {
+		endoff = 8 - (uint8_t)size;
 		pad = (char *)&postpad;
 
 		while (size) {
@@ -386,12 +397,13 @@
 		*(volatile uint64_t __force *)addr = postpad;
 	}
 
-	return roundup(len + off, 8);
+	return len + off + endoff;
 }
 
-static int stm_trace_ost_header(unsigned long ch_addr, uint32_t options,
-				uint8_t entity_id, uint8_t proto_id,
-				const void *payload_data, uint32_t payload_size)
+static int stm_trace_ost_header_64bit(unsigned long ch_addr, uint32_t options,
+				      uint8_t entity_id, uint8_t proto_id,
+				      const void *payload_data,
+				      uint32_t payload_size)
 {
 	void *addr;
 	uint8_t prepad_size;
@@ -400,14 +412,92 @@
 
 	hdr = (char *)&header;
 
-	hdr[0] = OST_START_TOKEN;
-	hdr[1] = OST_VERSION;
+	hdr[0] = OST_TOKEN_STARTBASE;
+	hdr[1] = OST_VERSION_PROP;
 	hdr[2] = entity_id;
 	hdr[3] = proto_id;
 	prepad_size = (unsigned long)payload_data & 0x7;
 	*(uint32_t *)(hdr + 4) = (prepad_size << 24) | payload_size;
 
-	/* for 64bit writes, header is expected to be of the D32M, D32M */
+	/* for 64bit writes, header is expected to be D32M, D32M type */
+	options |= STM_OPTION_MARKED;
+	options &= ~STM_OPTION_TIMESTAMPED;
+	addr =  (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
+
+	return stm_send_64bit(addr, &header, sizeof(header));
+}
+
+static int stm_trace_data_64bit(unsigned long ch_addr, uint32_t options,
+				const void *data, uint32_t size)
+{
+	void *addr;
+
+	options &= ~STM_OPTION_TIMESTAMPED;
+	addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
+
+	return stm_send_64bit(addr, data, size);
+}
+
+static int stm_trace_ost_tail_64bit(unsigned long ch_addr, uint32_t options)
+{
+	void *addr;
+	uint64_t tail = 0x0;
+
+	addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, options));
+
+	return stm_send_64bit(addr, &tail, sizeof(tail));
+}
+
+static int stm_send(void *addr, const void *data, uint32_t size)
+{
+	if (((unsigned long)data & 0x1) && (size >= 1)) {
+		stm_data_writeb(*(uint8_t *)data, addr);
+		data++;
+		size--;
+	}
+	if (((unsigned long)data & 0x2) && (size >= 2)) {
+		stm_data_writew(*(uint16_t *)data, addr);
+		data += 2;
+		size -= 2;
+	}
+
+	/* now we are 32bit aligned */
+	while (size >= 4) {
+		stm_data_writel(*(uint32_t *)data, addr);
+		data += 4;
+		size -= 4;
+	}
+
+	if (size >= 2) {
+		stm_data_writew(*(uint16_t *)data, addr);
+		data += 2;
+		size -= 2;
+	}
+	if (size >= 1) {
+		stm_data_writeb(*(uint8_t *)data, addr);
+		data++;
+		size--;
+	}
+
+	return size;
+}
+
+static int stm_trace_ost_header(unsigned long ch_addr, uint32_t options,
+				uint8_t entity_id, uint8_t proto_id,
+				const void *payload_data, uint32_t payload_size)
+{
+	void *addr;
+	uint32_t header;
+	char *hdr;
+
+	hdr = (char *)&header;
+
+	hdr[0] = OST_TOKEN_STARTSIMPLE;
+	hdr[1] = OST_VERSION_MIPI1;
+	hdr[2] = entity_id;
+	hdr[3] = proto_id;
+
+	/* header is expected to be D32M type */
 	options |= STM_OPTION_MARKED;
 	options &= ~STM_OPTION_TIMESTAMPED;
 	addr =  (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, options));
@@ -429,7 +519,7 @@
 static int stm_trace_ost_tail(unsigned long ch_addr, uint32_t options)
 {
 	void *addr;
-	uint64_t tail = 0x0;
+	uint32_t tail = 0x0;
 
 	addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, options));
 
@@ -448,15 +538,27 @@
 	ch = stm_channel_alloc(0);
 	ch_addr = (unsigned long)stm_channel_addr(drvdata, ch);
 
-	/* send the ost header */
-	len += stm_trace_ost_header(ch_addr, options, entity_id, proto_id, data,
-				    size);
+	if (drvdata->write_64bit) {
+		/* send the ost header */
+		len += stm_trace_ost_header_64bit(ch_addr, options, entity_id,
+						  proto_id, data, size);
 
-	/* send the payload data */
-	len += stm_trace_data(ch_addr, options, data, size);
+		/* send the payload data */
+		len += stm_trace_data_64bit(ch_addr, options, data, size);
 
-	/* send the ost tail */
-	len += stm_trace_ost_tail(ch_addr, options);
+		/* send the ost tail */
+		len += stm_trace_ost_tail_64bit(ch_addr, options);
+	} else {
+		/* send the ost header */
+		len += stm_trace_ost_header(ch_addr, options, entity_id,
+					    proto_id, data, size);
+
+		/* send the payload data */
+		len += stm_trace_data(ch_addr, options, data, size);
+
+		/* send the ost tail */
+		len += stm_trace_ost_tail(ch_addr, options);
+	}
 
 	/* we are done, free the channel */
 	stm_channel_free(ch);
@@ -744,6 +846,10 @@
 
 	bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
 
+	if (pdev->dev.of_node)
+		drvdata->write_64bit = of_property_read_bool(pdev->dev.of_node,
+							"qcom,write-64bit");
+
 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
 	if (!desc)
 		return -ENOMEM;
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index 10eabca..0afb5a2 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -29,6 +29,7 @@
 #include <linux/of.h>
 #include <linux/of_coresight.h>
 #include <linux/coresight.h>
+#include <linux/coresight-cti.h>
 #include <linux/usb/usb_qdss.h>
 #include <mach/memory.h>
 #include <mach/sps.h>
@@ -136,6 +137,9 @@
 	struct miscdevice	miscdev;
 	struct clk		*clk;
 	spinlock_t		spinlock;
+	bool			reset_flush_race;
+	struct coresight_cti	*cti_flush;
+	struct coresight_cti	*cti_reset;
 	struct mutex		read_lock;
 	int			read_count;
 	bool			reading;
@@ -372,7 +376,7 @@
 	TMC_UNLOCK(drvdata);
 
 	tmc_writel(drvdata, TMC_MODE_CIRCULAR_BUFFER, TMC_MODE);
-	tmc_writel(drvdata, 0x133, TMC_FFCR);
+	tmc_writel(drvdata, 0x1133, TMC_FFCR);
 	tmc_writel(drvdata, drvdata->trigger_cntr, TMC_TRG);
 	__tmc_enable(drvdata);
 
@@ -401,7 +405,7 @@
 
 	tmc_writel(drvdata, drvdata->paddr, TMC_DBALO);
 	tmc_writel(drvdata, 0x0, TMC_DBAHI);
-	tmc_writel(drvdata, 0x133, TMC_FFCR);
+	tmc_writel(drvdata, 0x1133, TMC_FFCR);
 	tmc_writel(drvdata, drvdata->trigger_cntr, TMC_TRG);
 	__tmc_enable(drvdata);
 
@@ -430,8 +434,15 @@
 		return ret;
 
 	mutex_lock(&drvdata->usb_lock);
-	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-		if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+		coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
+		coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
+	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM &&
+		    !drvdata->reset_flush_race) {
+			coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+			coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
+		} else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
 			drvdata->usbch = usb_qdss_open("qdss", drvdata,
 						       usb_notifier);
 			if (IS_ERR(drvdata->usbch)) {
@@ -440,6 +451,11 @@
 				goto err0;
 			}
 		}
+	} else {
+		if (mode == TMC_MODE_CIRCULAR_BUFFER) {
+			coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
+			coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
+		}
 	}
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -632,7 +648,6 @@
 static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
 {
 	unsigned long flags;
-	bool etr_bam_disable = false;
 
 	mutex_lock(&drvdata->usb_lock);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -645,27 +660,32 @@
 		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
 			__tmc_etr_disable_to_mem(drvdata);
 		else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
-			etr_bam_disable = true;
+			__tmc_etr_disable_to_bam(drvdata);
 	} else {
 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
 			__tmc_etb_disable(drvdata);
 		else
 			__tmc_etf_disable(drvdata);
 	}
-out:
 	drvdata->enable = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	if (etr_bam_disable) {
-		if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
-			if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
-				spin_lock_irqsave(&drvdata->spinlock, flags);
-				__tmc_etr_disable_to_bam(drvdata);
-				spin_unlock_irqrestore(&drvdata->spinlock,
-						       flags);
-				tmc_etr_bam_disable(drvdata);
-				usb_qdss_close(drvdata->usbch);
-			}
+	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+		coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+		coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
+	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM &&
+		    !drvdata->reset_flush_race) {
+			coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+			coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+		} else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+			tmc_etr_bam_disable(drvdata);
+			usb_qdss_close(drvdata->usbch);
+		}
+	} else {
+		if (mode == TMC_MODE_CIRCULAR_BUFFER) {
+			coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+			coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
 		}
 	}
 	mutex_unlock(&drvdata->usb_lock);
@@ -673,6 +693,15 @@
 	clk_disable_unprepare(drvdata->clk);
 
 	dev_info(drvdata->dev, "TMC disabled\n");
+	return;
+out:
+	drvdata->enable = false;
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	mutex_unlock(&drvdata->usb_lock);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	dev_info(drvdata->dev, "TMC disabled\n");
 }
 
 static void tmc_disable_sink(struct coresight_device *csdev)
@@ -707,6 +736,8 @@
 	} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
 		if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
 			__tmc_etr_disable_to_mem(drvdata);
+		else if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+			__tmc_etr_disable_to_bam(drvdata);
 	} else {
 		mode = tmc_readl(drvdata, TMC_MODE);
 		if (mode == TMC_MODE_CIRCULAR_BUFFER)
@@ -940,7 +971,6 @@
 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	char str[10] = "";
 	unsigned long flags;
-	bool etr_bam_flag = false;
 	int ret;
 
 	if (strlen(buf) >= 10)
@@ -954,42 +984,52 @@
 			goto out;
 
 		spin_lock_irqsave(&drvdata->spinlock, flags);
-		if (drvdata->enable) {
-			__tmc_etr_disable_to_bam(drvdata);
-			__tmc_etr_enable_to_mem(drvdata);
-			etr_bam_flag = true;
+		if (!drvdata->enable) {
+			drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			goto out;
 		}
+		__tmc_etr_disable_to_bam(drvdata);
+		__tmc_etr_enable_to_mem(drvdata);
 		drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-		if (etr_bam_flag) {
-			tmc_etr_bam_disable(drvdata);
-			usb_qdss_close(drvdata->usbch);
+		if (!drvdata->reset_flush_race) {
+			coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+			coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
 		}
+
+		tmc_etr_bam_disable(drvdata);
+		usb_qdss_close(drvdata->usbch);
 	} else if (!strcmp(str, "usb")) {
 		if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
 			goto out;
 
 		spin_lock_irqsave(&drvdata->spinlock, flags);
-		if (drvdata->enable) {
-			if (drvdata->reading) {
-				ret = -EBUSY;
-				goto err1;
-			}
-			__tmc_etr_disable_to_mem(drvdata);
-			etr_bam_flag = true;
+		if (!drvdata->enable) {
+			drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			goto out;
 		}
+		if (drvdata->reading) {
+			ret = -EBUSY;
+			goto err1;
+		}
+		__tmc_etr_disable_to_mem(drvdata);
 		drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-		if (etr_bam_flag) {
-			drvdata->usbch = usb_qdss_open("qdss", drvdata,
-						       usb_notifier);
-			if (IS_ERR(drvdata->usbch)) {
-				dev_err(drvdata->dev, "usb_qdss_open failed\n");
-				ret = PTR_ERR(drvdata->usbch);
-				goto err0;
-			}
+		if (!drvdata->reset_flush_race) {
+			coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
+			coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+		}
+
+		drvdata->usbch = usb_qdss_open("qdss", drvdata,
+					       usb_notifier);
+		if (IS_ERR(drvdata->usbch)) {
+			dev_err(drvdata->dev, "usb_qdss_open failed\n");
+			ret = PTR_ERR(drvdata->usbch);
+			goto err0;
 		}
 	}
 out:
@@ -1091,6 +1131,7 @@
 	static int count;
 	void *baddr;
 	struct msm_client_dump dump;
+	struct coresight_cti_data *ctidata;
 	struct coresight_desc *desc;
 
 	if (pdev->dev.of_node) {
@@ -1209,6 +1250,27 @@
 	}
 	count++;
 
+	if (pdev->dev.of_node) {
+		drvdata->reset_flush_race = of_property_read_bool(
+						pdev->dev.of_node,
+						"qcom,reset-flush-race");
+
+		ctidata = of_get_coresight_cti_data(dev, pdev->dev.of_node);
+		if (IS_ERR(ctidata)) {
+			dev_err(dev, "invalid cti data\n");
+		} else if (ctidata && ctidata->nr_ctis == 2) {
+			drvdata->cti_flush = coresight_cti_get(
+							ctidata->names[0]);
+			if (IS_ERR(drvdata->cti_flush))
+				dev_err(dev, "failed to get flush cti\n");
+
+			drvdata->cti_reset = coresight_cti_get(
+							ctidata->names[1]);
+			if (IS_ERR(drvdata->cti_reset))
+				dev_err(dev, "failed to get reset cti\n");
+		}
+	}
+
 	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
 	if (!desc) {
 		ret = -ENOMEM;
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
index a9d0182..1eccd09 100644
--- a/drivers/coresight/of_coresight.c
+++ b/drivers/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/coresight.h>
+#include <linux/coresight-cti.h>
 
 struct coresight_platform_data *of_get_coresight_platform_data(
 				struct device *dev, struct device_node *node)
@@ -97,3 +98,45 @@
 	return pdata;
 }
 EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+
+struct coresight_cti_data *of_get_coresight_cti_data(
+				struct device *dev, struct device_node *node)
+{
+	int i, ret;
+	uint32_t ctis_len;
+	struct device_node *child_node;
+	struct coresight_cti_data *ctidata;
+
+	ctidata = devm_kzalloc(dev, sizeof(*ctidata), GFP_KERNEL);
+	if (!ctidata)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_get_property(node, "coresight-ctis", &ctis_len))
+		ctidata->nr_ctis = ctis_len/sizeof(uint32_t);
+	else
+		return ERR_PTR(-EINVAL);
+
+	if (ctidata->nr_ctis) {
+		ctidata->names = devm_kzalloc(dev, ctidata->nr_ctis *
+					      sizeof(*ctidata->names),
+					      GFP_KERNEL);
+		if (!ctidata->names)
+			return ERR_PTR(-ENOMEM);
+
+		for (i = 0; i < ctidata->nr_ctis; i++) {
+			child_node = of_parse_phandle(node, "coresight-ctis",
+						      i);
+			if (!child_node)
+				return ERR_PTR(-EINVAL);
+
+			ret = of_property_read_string(child_node,
+						      "coresight-name",
+						      &ctidata->names[i]);
+			of_node_put(child_node);
+			if (ret)
+				return ERR_PTR(ret);
+		}
+	}
+	return ctidata;
+}
+EXPORT_SYMBOL(of_get_coresight_cti_data);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 78a666d..a76b689 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -18,3 +18,6 @@
 	bool
 	depends on CPU_IDLE && NO_HZ
 	default y
+
+config ARCH_NEEDS_CPU_IDLE_COUPLED
+	def_bool n
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 5634f88..38c8f69 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
+obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
new file mode 100644
index 0000000..c24dda0
--- /dev/null
+++ b/drivers/cpuidle/coupled.c
@@ -0,0 +1,727 @@
+/*
+ * coupled.c - helper functions to enter the same idle state on multiple cpus
+ *
+ * Copyright (c) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "cpuidle.h"
+
+/**
+ * DOC: Coupled cpuidle states
+ *
+ * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
+ * cpus cannot be independently powered down, either due to
+ * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
+ * power down), or due to HW bugs (on OMAP4460, a cpu powering up
+ * will corrupt the gic state unless the other cpu runs a work
+ * around).  Each cpu has a power state that it can enter without
+ * coordinating with the other cpu (usually Wait For Interrupt, or
+ * WFI), and one or more "coupled" power states that affect blocks
+ * shared between the cpus (L2 cache, interrupt controller, and
+ * sometimes the whole SoC).  Entering a coupled power state must
+ * be tightly controlled on both cpus.
+ *
+ * This file implements a solution, where each cpu will wait in the
+ * WFI state until all cpus are ready to enter a coupled state, at
+ * which point the coupled state function will be called on all
+ * cpus at approximately the same time.
+ *
+ * Once all cpus are ready to enter idle, they are woken by an smp
+ * cross call.  At this point, there is a chance that one of the
+ * cpus will find work to do, and choose not to enter idle.  A
+ * final pass is needed to guarantee that all cpus will call the
+ * power state enter function at the same time.  During this pass,
+ * each cpu will increment the ready counter, and continue once the
+ * ready counter matches the number of online coupled cpus.  If any
+ * cpu exits idle, the other cpus will decrement their counter and
+ * retry.
+ *
+ * requested_state stores the deepest coupled idle state each cpu
+ * is ready for.  It is assumed that the states are indexed from
+ * shallowest (highest power, lowest exit latency) to deepest
+ * (lowest power, highest exit latency).  The requested_state
+ * variable is not locked.  It is only written from the cpu that
+ * it stores (or by the on/offlining cpu if that cpu is offline),
+ * and only read after all the cpus are ready for the coupled idle
+ * state are are no longer updating it.
+ *
+ * Three atomic counters are used.  alive_count tracks the number
+ * of cpus in the coupled set that are currently or soon will be
+ * online.  waiting_count tracks the number of cpus that are in
+ * the waiting loop, in the ready loop, or in the coupled idle state.
+ * ready_count tracks the number of cpus that are in the ready loop
+ * or in the coupled idle state.
+ *
+ * To use coupled cpuidle states, a cpuidle driver must:
+ *
+ *    Set struct cpuidle_device.coupled_cpus to the mask of all
+ *    coupled cpus, usually the same as cpu_possible_mask if all cpus
+ *    are part of the same cluster.  The coupled_cpus mask must be
+ *    set in the struct cpuidle_device for each cpu.
+ *
+ *    Set struct cpuidle_device.safe_state to a state that is not a
+ *    coupled state.  This is usually WFI.
+ *
+ *    Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
+ *    state that affects multiple cpus.
+ *
+ *    Provide a struct cpuidle_state.enter function for each state
+ *    that affects multiple cpus.  This function is guaranteed to be
+ *    called on all cpus at approximately the same time.  The driver
+ *    should ensure that the cpus all abort together if any cpu tries
+ *    to abort once the function is called.  The function should return
+ *    with interrupts still disabled.
+ */
+
+/**
+ * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
+ * @coupled_cpus: mask of cpus that are part of the coupled set
+ * @requested_state: array of requested states for cpus in the coupled set
+ * @ready_waiting_counts: combined count of cpus  in ready or waiting loops
+ * @online_count: count of cpus that are online
+ * @refcnt: reference count of cpuidle devices that are using this struct
+ * @prevent: flag to prevent coupled idle while a cpu is hotplugging
+ */
+struct cpuidle_coupled {
+	cpumask_t coupled_cpus;
+	int requested_state[NR_CPUS];
+	atomic_t ready_waiting_counts;
+	int online_count;
+	int refcnt;
+	int prevent;
+};
+
+#define WAITING_BITS 16
+#define MAX_WAITING_CPUS (1 << WAITING_BITS)
+#define WAITING_MASK (MAX_WAITING_CPUS - 1)
+#define READY_MASK (~WAITING_MASK)
+
+#define CPUIDLE_COUPLED_NOT_IDLE	(-1)
+
+static DEFINE_MUTEX(cpuidle_coupled_lock);
+static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+/*
+ * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * __smp_call_function_single with the per cpu call_single_data struct already
+ * in use.  This prevents a deadlock where two cpus are waiting for each others
+ * call_single_data struct to be available
+ */
+static cpumask_t cpuidle_coupled_poked_mask;
+
+/**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+ * @dev: cpuidle_device of the calling cpu
+ * @a:   atomic variable to hold the barrier
+ *
+ * No caller to this function will return from this function until all online
+ * cpus in the same coupled group have called this function.  Once any caller
+ * has returned from this function, the barrier is immediately available for
+ * reuse.
+ *
+ * The atomic variable a must be initialized to 0 before any cpu calls
+ * this function, will be reset to 0 before any cpu returns from this function.
+ *
+ * Must only be called from within a coupled idle state handler
+ * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
+ *
+ * Provides full smp barrier semantics before and after calling.
+ */
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+	int n = dev->coupled->online_count;
+
+	smp_mb__before_atomic_inc();
+	atomic_inc(a);
+
+	while (atomic_read(a) < n)
+		cpu_relax();
+
+	if (atomic_inc_return(a) == n * 2) {
+		atomic_set(a, 0);
+		return;
+	}
+
+	while (atomic_read(a) > n)
+		cpu_relax();
+}
+
+/**
+ * cpuidle_state_is_coupled - check if a state is part of a coupled set
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @state: index of the target state in drv->states
+ *
+ * Returns true if the target state is coupled with cpus besides this one
+ */
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+	struct cpuidle_driver *drv, int state)
+{
+	return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
+}
+
+/**
+ * cpuidle_coupled_set_ready - mark a cpu as ready
+ * @coupled: the struct coupled that contains the current cpu
+ */
+static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
+{
+	atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_set_not_ready - mark a cpu as not ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Decrements the ready counter, unless the ready (and thus the waiting) counter
+ * is equal to the number of online cpus.  Prevents a race where one cpu
+ * decrements the waiting counter and then re-increments it just before another
+ * cpu has decremented its ready counter, leading to the ready counter going
+ * down from the number of online cpus without going through the coupled idle
+ * state.
+ *
+ * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
+ * counter was equal to the number of online cpus.
+ */
+static
+inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
+{
+	int all;
+	int ret;
+
+	all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+	ret = atomic_add_unless(&coupled->ready_waiting_counts,
+		-MAX_WAITING_CPUS, all);
+
+	return ret ? 0 : -EINVAL;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the ready loop.
+ */
+static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
+{
+	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+	return r == 0;
+}
+
+/**
+ * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the ready loop
+ */
+static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
+{
+	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+	return r == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the wait loop
+ */
+static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+	return w == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the waiting loop.
+ */
+static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+	return w == 0;
+}
+
+/**
+ * cpuidle_coupled_get_state - determine the deepest idle state
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns the deepest idle state that all coupled cpus can enter
+ */
+static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
+		struct cpuidle_coupled *coupled)
+{
+	int i;
+	int state = INT_MAX;
+
+	/*
+	 * Read barrier ensures that read of requested_state is ordered after
+	 * reads of ready_count.  Matches the write barriers
+	 * cpuidle_set_state_waiting.
+	 */
+	smp_rmb();
+
+	for_each_cpu_mask(i, coupled->coupled_cpus)
+		if (cpu_online(i) && coupled->requested_state[i] < state)
+			state = coupled->requested_state[i];
+
+	return state;
+}
+
+static void cpuidle_coupled_poked(void *info)
+{
+	int cpu = (unsigned long)info;
+	cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+}
+
+/**
+ * cpuidle_coupled_poke - wake up a cpu that may be waiting
+ * @cpu: target cpu
+ *
+ * Ensures that the target cpu exits it's waiting idle state (if it is in it)
+ * and will see updates to waiting_count before it re-enters it's waiting idle
+ * state.
+ *
+ * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
+ * either has or will soon have a pending IPI that will wake it out of idle,
+ * or it is currently processing the IPI and is not in idle.
+ */
+static void cpuidle_coupled_poke(int cpu)
+{
+	struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+
+	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+		__smp_call_function_single(cpu, csd, 0);
+}
+
+/**
+ * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Calls cpuidle_coupled_poke on all other online cpus.
+ */
+static void cpuidle_coupled_poke_others(int this_cpu,
+		struct cpuidle_coupled *coupled)
+{
+	int cpu;
+
+	for_each_cpu_mask(cpu, coupled->coupled_cpus)
+		if (cpu != this_cpu && cpu_online(cpu))
+			cpuidle_coupled_poke(cpu);
+}
+
+/**
+ * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ * @next_state: the index in drv->states of the requested state for this cpu
+ *
+ * Updates the requested idle state for the specified cpuidle device,
+ * poking all coupled cpus out of idle if necessary to let them see the new
+ * state.
+ */
+static void cpuidle_coupled_set_waiting(int cpu,
+		struct cpuidle_coupled *coupled, int next_state)
+{
+	int w;
+
+	coupled->requested_state[cpu] = next_state;
+
+	/*
+	 * If this is the last cpu to enter the waiting state, poke
+	 * all the other cpus out of their waiting state so they can
+	 * enter a deeper state.  This can race with one of the cpus
+	 * exiting the waiting state due to an interrupt and
+	 * decrementing waiting_count, see comment below.
+	 *
+	 * The atomic_inc_return provides a write barrier to order the write
+	 * to requested_state with the later write that increments ready_count.
+	 */
+	w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+	if (w == coupled->online_count)
+		cpuidle_coupled_poke_others(cpu, coupled);
+}
+
+/**
+ * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Removes the requested idle state for the specified cpuidle device.
+ */
+static void cpuidle_coupled_set_not_waiting(int cpu,
+		struct cpuidle_coupled *coupled)
+{
+	/*
+	 * Decrementing waiting count can race with incrementing it in
+	 * cpuidle_coupled_set_waiting, but that's OK.  Worst case, some
+	 * cpus will increment ready_count and then spin until they
+	 * notice that this cpu has cleared it's requested_state.
+	 */
+	atomic_dec(&coupled->ready_waiting_counts);
+
+	coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
+}
+
+/**
+ * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
+ * @cpu: the current cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Marks this cpu as no longer in the ready and waiting loops.  Decrements
+ * the waiting count first to prevent another cpu looping back in and seeing
+ * this cpu as waiting just before it exits idle.
+ */
+static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
+{
+	cpuidle_coupled_set_not_waiting(cpu, coupled);
+	atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
+ * @cpu - this cpu
+ *
+ * Turns on interrupts and spins until any outstanding poke interrupts have
+ * been processed and the poke bit has been cleared.
+ *
+ * Other interrupts may also be processed while interrupts are enabled, so
+ * need_resched() must be tested after turning interrupts off again to make sure
+ * the interrupt didn't schedule work that should take the cpu out of idle.
+ *
+ * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ */
+static int cpuidle_coupled_clear_pokes(int cpu)
+{
+	local_irq_enable();
+	while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+		cpu_relax();
+	local_irq_disable();
+
+	return need_resched() ? -EINTR : 0;
+}
+
+/**
+ * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @next_state: index of the requested state in drv->states
+ *
+ * Coordinate with coupled cpus to enter the target state.  This is a two
+ * stage process.  In the first stage, the cpus are operating independently,
+ * and may call into cpuidle_enter_state_coupled at completely different times.
+ * To save as much power as possible, the first cpus to call this function will
+ * go to an intermediate state (the cpuidle_device's safe state), and wait for
+ * all the other cpus to call this function.  Once all coupled cpus are idle,
+ * the second stage will start.  Each coupled cpu will spin until all cpus have
+ * guaranteed that they will call the target_state.
+ *
+ * This function must be called with interrupts disabled.  It may enable
+ * interrupts while preparing for idle, and it will always return with
+ * interrupts enabled.
+ */
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int next_state)
+{
+	int entered_state = -1;
+	struct cpuidle_coupled *coupled = dev->coupled;
+
+	if (!coupled)
+		return -EINVAL;
+
+	while (coupled->prevent) {
+		if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+			local_irq_enable();
+			return entered_state;
+		}
+		entered_state = cpuidle_enter_state(dev, drv,
+			dev->safe_state_index);
+	}
+
+	/* Read barrier ensures online_count is read after prevent is cleared */
+	smp_rmb();
+
+	cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+
+retry:
+	/*
+	 * Wait for all coupled cpus to be idle, using the deepest state
+	 * allowed for a single cpu.
+	 */
+	while (!cpuidle_coupled_cpus_waiting(coupled)) {
+		if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+			goto out;
+		}
+
+		if (coupled->prevent) {
+			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+			goto out;
+		}
+
+		entered_state = cpuidle_enter_state(dev, drv,
+			dev->safe_state_index);
+	}
+
+	if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+		cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+		goto out;
+	}
+
+	/*
+	 * All coupled cpus are probably idle.  There is a small chance that
+	 * one of the other cpus just became active.  Increment the ready count,
+	 * and spin until all coupled cpus have incremented the counter. Once a
+	 * cpu has incremented the ready counter, it cannot abort idle and must
+	 * spin until either all cpus have incremented the ready counter, or
+	 * another cpu leaves idle and decrements the waiting counter.
+	 */
+
+	cpuidle_coupled_set_ready(coupled);
+	while (!cpuidle_coupled_cpus_ready(coupled)) {
+		/* Check if any other cpus bailed out of idle. */
+		if (!cpuidle_coupled_cpus_waiting(coupled))
+			if (!cpuidle_coupled_set_not_ready(coupled))
+				goto retry;
+
+		cpu_relax();
+	}
+
+	/* all cpus have acked the coupled state */
+	next_state = cpuidle_coupled_get_state(dev, coupled);
+
+	entered_state = cpuidle_enter_state(dev, drv, next_state);
+
+	cpuidle_coupled_set_done(dev->cpu, coupled);
+
+out:
+	/*
+	 * Normal cpuidle states are expected to return with irqs enabled.
+	 * That leads to an inefficiency where a cpu receiving an interrupt
+	 * that brings it out of idle will process that interrupt before
+	 * exiting the idle enter function and decrementing ready_count.  All
+	 * other cpus will need to spin waiting for the cpu that is processing
+	 * the interrupt.  If the driver returns with interrupts disabled,
+	 * all other cpus will loop back into the safe idle state instead of
+	 * spinning, saving power.
+	 *
+	 * Calling local_irq_enable here allows coupled states to return with
+	 * interrupts disabled, but won't cause problems for drivers that
+	 * exit with interrupts enabled.
+	 */
+	local_irq_enable();
+
+	/*
+	 * Wait until all coupled cpus have exited idle.  There is no risk that
+	 * a cpu exits and re-enters the ready state because this cpu has
+	 * already decremented its waiting_count.
+	 */
+	while (!cpuidle_coupled_no_cpus_ready(coupled))
+		cpu_relax();
+
+	return entered_state;
+}
+
+static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
+{
+	cpumask_t cpus;
+	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+	coupled->online_count = cpumask_weight(&cpus);
+}
+
+/**
+ * cpuidle_coupled_register_device - register a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_register_device to handle coupled idle init.  Finds the
+ * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
+ * exists yet.
+ */
+int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+	int cpu;
+	struct cpuidle_device *other_dev;
+	struct call_single_data *csd;
+	struct cpuidle_coupled *coupled;
+
+	if (cpumask_empty(&dev->coupled_cpus))
+		return 0;
+
+	for_each_cpu_mask(cpu, dev->coupled_cpus) {
+		other_dev = per_cpu(cpuidle_devices, cpu);
+		if (other_dev && other_dev->coupled) {
+			coupled = other_dev->coupled;
+			goto have_coupled;
+		}
+	}
+
+	/* No existing coupled info found, create a new one */
+	coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
+	if (!coupled)
+		return -ENOMEM;
+
+	coupled->coupled_cpus = dev->coupled_cpus;
+
+have_coupled:
+	dev->coupled = coupled;
+	if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
+		coupled->prevent++;
+
+	cpuidle_coupled_update_online_cpus(coupled);
+
+	coupled->refcnt++;
+
+	csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
+	csd->func = cpuidle_coupled_poked;
+	csd->info = (void *)(unsigned long)dev->cpu;
+
+	return 0;
+}
+
+/**
+ * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_unregister_device to tear down coupled idle.  Removes the
+ * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
+ * this was the last cpu in the set.
+ */
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+	struct cpuidle_coupled *coupled = dev->coupled;
+
+	if (cpumask_empty(&dev->coupled_cpus))
+		return;
+
+	if (--coupled->refcnt)
+		kfree(coupled);
+	dev->coupled = NULL;
+}
+
+/**
+ * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Disables coupled cpuidle on a coupled set of cpus.  Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
+{
+	int cpu = get_cpu();
+
+	/* Force all cpus out of the waiting loop. */
+	coupled->prevent++;
+	cpuidle_coupled_poke_others(cpu, coupled);
+	put_cpu();
+	while (!cpuidle_coupled_no_cpus_waiting(coupled))
+		cpu_relax();
+}
+
+/**
+ * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Enables coupled cpuidle on a coupled set of cpus.  Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
+{
+	int cpu = get_cpu();
+
+	/*
+	 * Write barrier ensures readers see the new online_count when they
+	 * see prevent == 0.
+	 */
+	smp_wmb();
+	coupled->prevent--;
+	/* Force cpus out of the prevent loop. */
+	cpuidle_coupled_poke_others(cpu, coupled);
+	put_cpu();
+}
+
+/**
+ * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
+ * @nb: notifier block
+ * @action: hotplug transition
+ * @hcpu: target cpu number
+ *
+ * Called when a cpu is brought on or offline using hotplug.  Updates the
+ * coupled cpu set appropriately
+ */
+static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
+		unsigned long action, void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	struct cpuidle_device *dev;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_UP_PREPARE:
+	case CPU_DOWN_PREPARE:
+	case CPU_ONLINE:
+	case CPU_DEAD:
+	case CPU_UP_CANCELED:
+	case CPU_DOWN_FAILED:
+		break;
+	default:
+		return NOTIFY_OK;
+	}
+
+	mutex_lock(&cpuidle_lock);
+
+	dev = per_cpu(cpuidle_devices, cpu);
+	if (!dev->coupled)
+		goto out;
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_UP_PREPARE:
+	case CPU_DOWN_PREPARE:
+		cpuidle_coupled_prevent_idle(dev->coupled);
+		break;
+	case CPU_ONLINE:
+	case CPU_DEAD:
+		cpuidle_coupled_update_online_cpus(dev->coupled);
+		/* Fall through */
+	case CPU_UP_CANCELED:
+	case CPU_DOWN_FAILED:
+		cpuidle_coupled_allow_idle(dev->coupled);
+		break;
+	}
+
+out:
+	mutex_unlock(&cpuidle_lock);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cpuidle_coupled_cpu_notifier = {
+	.notifier_call = cpuidle_coupled_cpu_notify,
+};
+
+static int __init cpuidle_coupled_init(void)
+{
+	return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+}
+core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2f0083a..e81cfda 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -103,6 +103,34 @@
 }
 
 /**
+ * cpuidle_enter_state - enter the state and update stats
+ * @dev: cpuidle device for this cpu
+ * @drv: cpuidle driver for this cpu
+ * @next_state: index into drv->states of the state to enter
+ */
+int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+		int next_state)
+{
+	int entered_state;
+
+	entered_state = cpuidle_enter_ops(dev, drv, next_state);
+
+	if (entered_state >= 0) {
+		/* Update cpuidle counters */
+		/* This can be moved to within driver enter routine
+		 * but that results in multiple copies of same code.
+		 */
+		dev->states_usage[entered_state].time +=
+				(unsigned long long)dev->last_residency;
+		dev->states_usage[entered_state].usage++;
+	} else {
+		dev->last_residency = 0;
+	}
+
+	return entered_state;
+}
+
+/**
  * cpuidle_idle_call - the main idle loop
  *
  * NOTE: no locks or semaphores should be used here
@@ -143,23 +171,15 @@
 	trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
 	trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
-	entered_state = cpuidle_enter_ops(dev, drv, next_state);
+	if (cpuidle_state_is_coupled(dev, drv, next_state))
+		entered_state = cpuidle_enter_state_coupled(dev, drv,
+							    next_state);
+	else
+		entered_state = cpuidle_enter_state(dev, drv, next_state);
 
 	trace_power_end_rcuidle(dev->cpu);
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
-	if (entered_state >= 0) {
-		/* Update cpuidle counters */
-		/* This can be moved to within driver enter routine
-		 * but that results in multiple copies of same code.
-		 */
-		dev->states_usage[entered_state].time +=
-				(unsigned long long)dev->last_residency;
-		dev->states_usage[entered_state].usage++;
-	} else {
-		dev->last_residency = 0;
-	}
-
 	/* give the governor an opportunity to reflect on the outcome */
 	if (cpuidle_curr_governor->reflect)
 		cpuidle_curr_governor->reflect(dev, entered_state);
@@ -387,13 +407,25 @@
 
 	per_cpu(cpuidle_devices, dev->cpu) = dev;
 	list_add(&dev->device_list, &cpuidle_detected_devices);
-	if ((ret = cpuidle_add_sysfs(cpu_dev))) {
-		module_put(cpuidle_driver->owner);
-		return ret;
-	}
+	ret = cpuidle_add_sysfs(cpu_dev);
+	if (ret)
+		goto err_sysfs;
+
+	ret = cpuidle_coupled_register_device(dev);
+	if (ret)
+		goto err_coupled;
 
 	dev->registered = 1;
 	return 0;
+
+err_coupled:
+	cpuidle_remove_sysfs(cpu_dev);
+	wait_for_completion(&dev->kobj_unregister);
+err_sysfs:
+	list_del(&dev->device_list);
+	per_cpu(cpuidle_devices, dev->cpu) = NULL;
+	module_put(cpuidle_driver->owner);
+	return ret;
 }
 
 /**
@@ -443,6 +475,8 @@
 	wait_for_completion(&dev->kobj_unregister);
 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
 
+	cpuidle_coupled_unregister_device(dev);
+
 	cpuidle_resume_and_unlock();
 
 	module_put(cpuidle_driver->owner);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 7db1866..76e7f69 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -14,6 +14,8 @@
 extern struct mutex cpuidle_lock;
 extern spinlock_t cpuidle_driver_lock;
 extern int cpuidle_disabled(void);
+extern int cpuidle_enter_state(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int next_state);
 
 /* idle loop */
 extern void cpuidle_install_idle_handler(void);
@@ -30,4 +32,34 @@
 extern int cpuidle_add_sysfs(struct device *dev);
 extern void cpuidle_remove_sysfs(struct device *dev);
 
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int state);
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int next_state);
+int cpuidle_coupled_register_device(struct cpuidle_device *dev);
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
+#else
+static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int state)
+{
+	return false;
+}
+
+static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int next_state)
+{
+	return -1;
+}
+
+static inline int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+	return 0;
+}
+
+static inline void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+}
+#endif
+
 #endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
index 5bb254b..39133b5 100644
--- a/drivers/gpu/ion/Kconfig
+++ b/drivers/gpu/ion/Kconfig
@@ -16,3 +16,12 @@
 	depends on ARCH_MSM && ION
 	help
 	  Choose this option if you wish to use ion on an MSM target.
+
+config ION_LEAK_CHECK
+	bool "Check for leaked Ion buffers (debugging)"
+	depends on ION
+	help
+	  Choose this option if you wish to enable checking for leaked
+	  ion buffers at runtime. Choosing this option will also add a
+	  debugfs node under the ion directory that can be used to
+	  enable/disable the leak checking.
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 82403d2..0904f9fe 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1024,10 +1024,91 @@
 	return client;
 }
 
+/**
+ * ion_mark_dangling_buffers_locked() - Mark dangling buffers
+ * @dev:	the ion device whose buffers will be searched
+ *
+ * Sets marked=1 for all known buffers associated with `dev' that no
+ * longer have a handle pointing to them. dev->lock should be held
+ * across a call to this function (and should only be unlocked after
+ * checking for marked buffers).
+ */
+static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
+{
+	struct rb_node *n, *n2;
+	/* mark all buffers as 1 */
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
+						node);
+
+		buf->marked = 1;
+	}
+
+	/* now see which buffers we can access */
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						node);
+
+		mutex_lock(&client->lock);
+		for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
+			struct ion_handle *handle
+				= rb_entry(n2, struct ion_handle, node);
+
+			handle->buffer->marked = 0;
+
+		}
+		mutex_unlock(&client->lock);
+
+	}
+}
+
+#ifdef CONFIG_ION_LEAK_CHECK
+static u32 ion_debug_check_leaks_on_destroy;
+
+static int ion_check_for_and_print_leaks(struct ion_device *dev)
+{
+	struct rb_node *n;
+	int num_leaks = 0;
+
+	if (!ion_debug_check_leaks_on_destroy)
+		return 0;
+
+	/* check for leaked buffers (those that no longer have a
+	 * handle pointing to them) */
+	ion_mark_dangling_buffers_locked(dev);
+
+	/* Anyone still marked as a 1 means a leaked handle somewhere */
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
+						node);
+
+		if (buf->marked == 1) {
+			pr_info("Leaked ion buffer at %p\n", buf);
+			num_leaks++;
+		}
+	}
+	return num_leaks;
+}
+static void setup_ion_leak_check(struct dentry *debug_root)
+{
+	debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
+			&ion_debug_check_leaks_on_destroy);
+}
+#else
+static int ion_check_for_and_print_leaks(struct ion_device *dev)
+{
+	return 0;
+}
+static void setup_ion_leak_check(struct dentry *debug_root)
+{
+}
+#endif
+
 void ion_client_destroy(struct ion_client *client)
 {
 	struct ion_device *dev = client->dev;
 	struct rb_node *n;
+	int num_leaks;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
 	while ((n = rb_first(&client->handles))) {
@@ -1040,8 +1121,21 @@
 		put_task_struct(client->task);
 	rb_erase(&client->node, &dev->clients);
 	debugfs_remove_recursive(client->debug_root);
+
+	num_leaks = ion_check_for_and_print_leaks(dev);
+
 	mutex_unlock(&dev->lock);
 
+	if (num_leaks) {
+		struct task_struct *current_task = current;
+		char current_task_name[TASK_COMM_LEN];
+		get_task_comm(current_task_name, current_task);
+		WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
+			__func__, num_leaks, num_leaks == 1 ? "" : "s");
+		pr_info("task name at time of leak: %s, pid: %d\n",
+			current_task_name, current_task->pid);
+	}
+
 	kfree(client->name);
 	kfree(client);
 }
@@ -1126,33 +1220,17 @@
 {
 }
 
-static void ion_vma_open(struct vm_area_struct *vma)
-{
-	struct ion_buffer *buffer = vma->vm_private_data;
-
-	pr_debug("%s: %d\n", __func__, __LINE__);
-
-	mutex_lock(&buffer->lock);
-	buffer->umap_cnt++;
-	mutex_unlock(&buffer->lock);
-}
-
 static void ion_vma_close(struct vm_area_struct *vma)
 {
 	struct ion_buffer *buffer = vma->vm_private_data;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
 
-	mutex_lock(&buffer->lock);
-	buffer->umap_cnt--;
-	mutex_unlock(&buffer->lock);
-
 	if (buffer->heap->ops->unmap_user)
 		buffer->heap->ops->unmap_user(buffer->heap, buffer);
 }
 
 static struct vm_operations_struct ion_vm_ops = {
-	.open = ion_vma_open,
 	.close = ion_vma_close,
 };
 
@@ -1176,7 +1254,6 @@
 		pr_err("%s: failure mapping buffer to userspace\n",
 		       __func__);
 	} else {
-		buffer->umap_cnt++;
 		mutex_unlock(&buffer->lock);
 
 		vma->vm_ops = &ion_vm_ops;
@@ -1414,9 +1491,6 @@
 	case ION_IOC_CLEAN_INV_CACHES:
 		return client->dev->custom_ioctl(client,
 						ION_IOC_CLEAN_INV_CACHES, arg);
-	case ION_IOC_GET_FLAGS:
-		return client->dev->custom_ioctl(client,
-						ION_IOC_GET_FLAGS, arg);
 	default:
 		return -ENOTTY;
 	}
@@ -1824,37 +1898,14 @@
 {
 	struct ion_device *dev = s->private;
 	struct rb_node *n;
-	struct rb_node *n2;
 
-	/* mark all buffers as 1 */
 	seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
 		"ref cnt");
+
 	mutex_lock(&dev->lock);
-	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
-		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
-						     node);
+	ion_mark_dangling_buffers_locked(dev);
 
-		buf->marked = 1;
-	}
-
-	/* now see which buffers we can access */
-	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
-		struct ion_client *client = rb_entry(n, struct ion_client,
-						     node);
-
-		mutex_lock(&client->lock);
-		for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
-			struct ion_handle *handle = rb_entry(n2,
-						struct ion_handle, node);
-
-			handle->buffer->marked = 0;
-
-		}
-		mutex_unlock(&client->lock);
-
-	}
-
-	/* And anyone still marked as a 1 means a leaked handle somewhere */
+	/* Anyone still marked as a 1 means a leaked handle somewhere */
 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
 		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
 						     node);
@@ -1915,6 +1966,8 @@
 	idev->clients = RB_ROOT;
 	debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
 			    &debug_leak_fops);
+
+	setup_ion_leak_check(idev->debug_root);
 	return idev;
 }
 
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 83463ac..56ccc8f 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -490,39 +490,31 @@
 
 struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
 {
+	size_t chunk_size = buffer->size;
 	struct sg_table *table;
-	int ret;
+	int ret, i, n_chunks;
+	struct scatterlist *sg;
 	struct ion_cp_buffer *buf = buffer->priv_virt;
 
+	if (ION_IS_CACHED(buffer->flags))
+		chunk_size = PAGE_SIZE;
+	else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
+		chunk_size = SZ_1M;
+
 	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!table)
 		return ERR_PTR(-ENOMEM);
 
-	if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M)) {
-		int n_chunks;
-		int i;
-		struct scatterlist *sg;
+	n_chunks = DIV_ROUND_UP(buffer->size, chunk_size);
 
-		/* Count number of 1MB chunks. Alignment is already checked. */
-		n_chunks = buffer->size >> 20;
+	ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
+	if (ret)
+		goto err0;
 
-		ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
-		if (ret)
-			goto err0;
-
-		for_each_sg(table->sgl, sg, table->nents, i) {
-			sg_dma_address(sg) = buf->buffer + i * SZ_1M;
-			sg->length = SZ_1M;
-			sg->offset = 0;
-		}
-	} else {
-		ret = sg_alloc_table(table, 1, GFP_KERNEL);
-		if (ret)
-			goto err0;
-
-		table->sgl->length = buffer->size;
-		table->sgl->offset = 0;
-		table->sgl->dma_address = buf->buffer;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		sg_dma_address(sg) = buf->buffer + i * chunk_size;
+		sg->length = chunk_size;
+		sg->offset = 0;
 	}
 
 	return table;
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index c5fef5b..761fdde 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -109,7 +109,7 @@
 		void *ptr = NULL;
 		unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
 		long size_remaining = PAGE_ALIGN(size);
-		unsigned int max_order = orders[0];
+		unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
 
 		data = kmalloc(sizeof(*data), GFP_KERNEL);
 		if (!data)
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 77ecfa5..9d1e90e 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -98,7 +98,6 @@
 	void *vaddr;
 	int dmap_cnt;
 	struct sg_table *sg_table;
-	int umap_cnt;
 	unsigned int iommu_map_cnt;
 	struct rb_root iommu_maps;
 	int marked;
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index fa4bad5..8c9b95d 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -176,9 +176,9 @@
 				buffer, ret_value);
 			atomic_dec(&buf->secure_cnt);
 		} else {
-			pr_debug("Protected buffer %p from %x-%x\n",
-				buffer, buf->buffer,
-				buf->buffer + buffer->size);
+			pr_debug("Protected buffer %p from %pa (size %x)\n",
+				buffer, &buf->buffer,
+				buffer->size);
 			buf->want_delayed_unsecure |=
 				flags & ION_UNSECURE_DELAYED ? 1 : 0;
 			buf->data = data;
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index b660968..832a9a1 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -55,7 +55,7 @@
 	},
 	{
 		.id	= ION_CP_MM_HEAP_ID,
-		.type	= ION_HEAP_TYPE_CP,
+		.type	= ION_HEAP_TYPE_SECURE_DMA,
 		.name	= ION_MM_HEAP_NAME,
 		.permission_type = IPT_TYPE_MM_CARVEOUT,
 	},
@@ -734,22 +734,6 @@
 		break;
 
 	}
-	case ION_IOC_GET_FLAGS:
-	{
-		struct ion_flag_data data;
-		int ret;
-		if (copy_from_user(&data, (void __user *)arg,
-					sizeof(struct ion_flag_data)))
-			return -EFAULT;
-
-		ret = ion_handle_get_flags(client, data.handle, &data.flags);
-		if (ret < 0)
-			return ret;
-		if (copy_to_user((void __user *)arg, &data,
-					sizeof(struct ion_flag_data)))
-			return -EFAULT;
-		break;
-	}
 	default:
 		return -ENOTTY;
 	}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b7d813c..b1a45bf 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2000,8 +2000,17 @@
 		/* Is the ring buffer is empty? */
 		GSL_RB_GET_READPTR(rb, &rb->rptr);
 		if (!device->active_cnt && (rb->rptr == rb->wptr)) {
-			/* Is the core idle? */
-			status = is_adreno_rbbm_status_idle(device);
+			/*
+			 * Are there interrupts pending? If so then pretend we
+			 * are not idle - this avoids the possiblity that we go
+			 * to a lower power state without handling interrupts
+			 * first.
+			 */
+
+			if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
+				/* Is the core idle? */
+				status = is_adreno_rbbm_status_idle(device);
+			}
 		}
 	} else {
 		status = true;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cc6eb16..b1cab9b 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -125,6 +125,7 @@
 					struct adreno_context *);
 	irqreturn_t (*irq_handler)(struct adreno_device *);
 	void (*irq_control)(struct adreno_device *, int);
+	unsigned int (*irq_pending)(struct adreno_device *);
 	void * (*snapshot)(struct adreno_device *, void *, int *, int);
 	void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
 	void (*start)(struct adreno_device *);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 952d1f8..6db6e7b 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1706,34 +1706,6 @@
 		return;
 	}
 
-	if (status & CP_INT_CNTL__RB_INT_MASK) {
-		/* signal intr completion event */
-		unsigned int context_id, timestamp;
-		kgsl_sharedmem_readl(&device->memstore, &context_id,
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-					current_context));
-
-		kgsl_sharedmem_readl(&device->memstore, &timestamp,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					eoptimestamp));
-
-		if (context_id < KGSL_MEMSTORE_MAX) {
-			/* reset per context ts_cmp_enable */
-			kgsl_sharedmem_writel(&device->memstore,
-					KGSL_MEMSTORE_OFFSET(context_id,
-						ts_cmp_enable), 0);
-			/* Always reset global timestamp ts_cmp_enable */
-			kgsl_sharedmem_writel(&device->memstore,
-					KGSL_MEMSTORE_OFFSET(
-						KGSL_MEMSTORE_GLOBAL,
-						ts_cmp_enable), 0);
-			wmb();
-		}
-
-		KGSL_CMD_WARN(device, "<%d:0x%x> ringbuffer interrupt\n",
-				context_id, timestamp);
-	}
-
 	for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
 		if (status & kgsl_cp_error_irqs[i].mask) {
 			KGSL_CMD_CRIT(rb->device, "%s\n",
@@ -1840,6 +1812,19 @@
 	wmb();
 }
 
+static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int rbbm, cp, mh;
+
+	adreno_regread(device, REG_RBBM_INT_CNTL, &rbbm);
+	adreno_regread(device, REG_CP_INT_CNTL, &cp);
+	adreno_regread(device, MH_INTERRUPT_MASK, &mh);
+
+	return ((rbbm & RBBM_INT_MASK) || (cp & CP_INT_MASK) ||
+		(mh & kgsl_mmu_get_int_mask())) ? 1 : 0;
+}
+
 static void a2xx_rb_init(struct adreno_device *adreno_dev,
 			struct adreno_ringbuffer *rb)
 {
@@ -2035,6 +2020,7 @@
 	.ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
 	.irq_handler = a2xx_irq_handler,
 	.irq_control = a2xx_irq_control,
+	.irq_pending = a2xx_irq_pending,
 	.snapshot = a2xx_snapshot,
 	.rb_init = a2xx_rb_init,
 	.busy_cycles = a2xx_busy_cycles,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index bbe97de..73a7f52 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2591,33 +2591,7 @@
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 
-	if (irq == A3XX_INT_CP_RB_INT) {
-		unsigned int context_id, timestamp;
-		kgsl_sharedmem_readl(&device->memstore, &context_id,
-				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-					current_context));
-
-		kgsl_sharedmem_readl(&device->memstore, &timestamp,
-				KGSL_MEMSTORE_OFFSET(context_id,
-					eoptimestamp));
-
-		if (context_id < KGSL_MEMSTORE_MAX) {
-			/* reset per context ts_cmp_enable */
-			kgsl_sharedmem_writel(&device->memstore,
-					KGSL_MEMSTORE_OFFSET(context_id,
-						ts_cmp_enable), 0);
-			/* Always reset global timestamp ts_cmp_enable */
-			kgsl_sharedmem_writel(&device->memstore,
-					KGSL_MEMSTORE_OFFSET(
-						KGSL_MEMSTORE_GLOBAL,
-						ts_cmp_enable), 0);
-			wmb();
-		}
-
-		KGSL_CMD_WARN(device, "<%d:0x%x> ringbuffer interrupt\n",
-				context_id, timestamp);
-	}
-
+	/* Wake up everybody waiting for the interrupt */
 	wake_up_interruptible_all(&device->wait_queue);
 
 	/* Schedule work to free mem and issue ibs */
@@ -2713,6 +2687,15 @@
 		adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
 }
 
+static unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev)
+{
+	unsigned int status;
+
+	adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status);
+
+	return (status & A3XX_INT_MASK) ? 1 : 0;
+}
+
 static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
@@ -2764,6 +2747,11 @@
 };
 
 static struct a3xx_vbif_data a305b_vbif[] = {
+	{ A3XX_VBIF_IN_RD_LIM_CONF0, 0x00181818 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF0, 0x00181818 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000018 },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000018 },
+	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303 },
 	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
 	{0, 0},
 };
@@ -2953,6 +2941,7 @@
 	.rb_init = a3xx_rb_init,
 	.irq_control = a3xx_irq_control,
 	.irq_handler = a3xx_irq_handler,
+	.irq_pending = a3xx_irq_pending,
 	.busy_cycles = a3xx_busy_cycles,
 	.start = a3xx_start,
 	.snapshot = a3xx_snapshot,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index c43ac51..1d25646 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -570,7 +570,7 @@
 	total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
 
 	/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
-	total_sizedwords += context ? 7 : 0;
+	total_sizedwords += context ? 13 : 0;
 
 	if (adreno_is_a3xx(adreno_dev))
 		total_sizedwords += 7;
@@ -720,7 +720,25 @@
 				context_id, ref_wait_ts)) >> 2);
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
 		/* # of conditional command DWORDs */
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 8);
+
+		/* Clear the ts_cmp_enable for the context */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_MEM_WRITE, 2));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
+			KGSL_MEMSTORE_OFFSET(
+				context_id, ts_cmp_enable));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
+
+		/* Clear the ts_cmp_enable for the global timestamp */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_MEM_WRITE, 2));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
+			KGSL_MEMSTORE_OFFSET(
+				KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
+
+		/* Trigger the interrupt */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_INTERRUPT, 1));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 115fcb7..7ed0b10 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1088,10 +1088,7 @@
 				      result);
 
 	/* Fire off any pending suspend operations that are in flight */
-
-	INIT_COMPLETION(dev_priv->device->suspend_gate);
-	dev_priv->device->active_cnt--;
-	complete(&dev_priv->device->suspend_gate);
+	kgsl_active_count_put(dev_priv->device);
 
 	return result;
 }
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 62316f3..66390fc 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -133,6 +133,7 @@
 	void *priv;
 	struct list_head list;
 	void *owner;
+	unsigned int created;
 };
 
 
@@ -449,4 +450,23 @@
 	kref_put(&context->refcount, kgsl_context_destroy);
 }
 
+/**
+ * kgsl_active_count_put - Decrease the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Decrease the active count for the KGSL device and trigger the suspend_gate
+ * completion if it hits zero
+ */
+static inline void
+kgsl_active_count_put(struct kgsl_device *device)
+{
+	if (device->active_cnt == 1)
+		INIT_COMPLETION(device->suspend_gate);
+
+	device->active_cnt--;
+
+	if (device->active_cnt == 0)
+		complete(&device->suspend_gate);
+}
+
 #endif  /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index be9b5eb..6798eed 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -16,6 +16,8 @@
 #include <linux/module.h>
 #include <kgsl_device.h>
 
+#include "kgsl_trace.h"
+
 static void _add_event_to_list(struct list_head *head, struct kgsl_event *event)
 {
 	struct list_head *n;
@@ -71,6 +73,7 @@
 	 */
 
 	if (timestamp_cmp(cur_ts, ts) >= 0) {
+		trace_kgsl_fire_event(id, ts, 0);
 		cb(device, priv, id, ts);
 		return 0;
 	}
@@ -84,6 +87,9 @@
 	event->priv = priv;
 	event->func = cb;
 	event->owner = owner;
+	event->created = jiffies;
+
+	trace_kgsl_register_event(id, ts);
 
 	/* inc refcount to avoid race conditions in cleanup */
 	if (context)
@@ -106,6 +112,13 @@
 	} else
 		_add_event_to_list(&device->events, event);
 
+	/*
+	 * Increase the active count on the device to avoid going into power
+	 * saving modes while events are pending
+	 */
+
+	device->active_cnt++;
+
 	queue_work(device->work_queue, &device->ts_expired_ws);
 	return 0;
 }
@@ -137,12 +150,16 @@
 		 * system got before the event was canceled
 		 */
 
+		trace_kgsl_fire_event(id, cur, jiffies - event->created);
+
 		if (event->func)
 			event->func(device, event->priv, id, cur);
 
 		kgsl_context_put(context);
 		list_del(&event->list);
 		kfree(event);
+
+		kgsl_active_count_put(device);
 	}
 
 	/* Remove ourselves from the master pending list */
@@ -175,6 +192,10 @@
 		 * the callback knows how far the GPU made it before things went
 		 * explosion
 		 */
+
+		trace_kgsl_fire_event(KGSL_MEMSTORE_GLOBAL, cur,
+			jiffies - event->created);
+
 		if (event->func)
 			event->func(device, event->priv, KGSL_MEMSTORE_GLOBAL,
 				cur);
@@ -184,6 +205,8 @@
 
 		list_del(&event->list);
 		kfree(event);
+
+		kgsl_active_count_put(device);
 	}
 }
 EXPORT_SYMBOL(kgsl_cancel_events);
@@ -207,6 +230,9 @@
 		 * to the timestamp they wanted
 		 */
 
+		trace_kgsl_fire_event(id, event->timestamp,
+			jiffies - event->created);
+
 		if (event->func)
 			event->func(device, event->priv, id, event->timestamp);
 
@@ -215,6 +241,8 @@
 
 		list_del(&event->list);
 		kfree(event);
+
+		kgsl_active_count_put(device);
 	}
 }
 
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 8078316..0dcbfdf 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -766,23 +766,23 @@
 			/* High latency clock maintenance. */
 			if ((pwr->pwrlevels[0].gpu_freq > 0) &&
 				(requested_state != KGSL_STATE_NAP)) {
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
 				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 					if (pwr->grp_clks[i])
 						clk_unprepare(pwr->grp_clks[i]);
+				clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->num_pwrlevels - 1].
+					gpu_freq);
 			}
 			kgsl_pwrctrl_busy_time(device, true);
 		} else if (requested_state == KGSL_STATE_SLEEP) {
 			/* High latency clock maintenance. */
+			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+				if (pwr->grp_clks[i])
+					clk_unprepare(pwr->grp_clks[i]);
 			if ((pwr->pwrlevels[0].gpu_freq > 0))
 				clk_set_rate(pwr->grp_clks[0],
 					pwr->pwrlevels[pwr->num_pwrlevels - 1].
 					gpu_freq);
-			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-				if (pwr->grp_clks[i])
-					clk_unprepare(pwr->grp_clks[i]);
 		}
 	} else if (state == KGSL_PWRFLAGS_ON) {
 		if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
@@ -790,15 +790,14 @@
 			trace_kgsl_clk(device, state);
 			/* High latency clock maintenance. */
 			if (device->state != KGSL_STATE_NAP) {
-				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-					if (pwr->grp_clks[i])
-						clk_prepare(pwr->grp_clks[i]);
-
 				if (pwr->pwrlevels[0].gpu_freq > 0)
 					clk_set_rate(pwr->grp_clks[0],
 						pwr->pwrlevels
 						[pwr->active_pwrlevel].
 						gpu_freq);
+				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+					if (pwr->grp_clks[i])
+						clk_prepare(pwr->grp_clks[i]);
 			}
 			/* as last step, enable grp_clk
 			   this is to let GPU interrupt to come */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index f7818bb..8c4811e 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -693,6 +693,41 @@
 	)
 );
 
+TRACE_EVENT(kgsl_register_event,
+		TP_PROTO(unsigned int id, unsigned int timestamp),
+		TP_ARGS(id, timestamp),
+		TP_STRUCT__entry(
+			__field(unsigned int, id)
+			__field(unsigned int, timestamp)
+		),
+		TP_fast_assign(
+			__entry->id = id;
+			__entry->timestamp = timestamp;
+		),
+		TP_printk(
+			"ctx=%d ts=%d",
+			__entry->id, __entry->timestamp)
+);
+
+TRACE_EVENT(kgsl_fire_event,
+		TP_PROTO(unsigned int id, unsigned int ts,
+			unsigned int age),
+		TP_ARGS(id, ts, age),
+		TP_STRUCT__entry(
+			__field(unsigned int, id)
+			__field(unsigned int, ts)
+			__field(unsigned int, age)
+		),
+		TP_fast_assign(
+			__entry->id = id;
+			__entry->ts = ts;
+			__entry->age = age;
+		),
+		TP_printk(
+			"ctx=%d ts=%d age=%u",
+			__entry->id, __entry->ts, __entry->age)
+);
+
 #endif /* _KGSL_TRACE_H */
 
 /* This part must be outside protection */
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/mobicore_driver/main.c
index 3fc9e17..df5675e 100644
--- a/drivers/gud/mobicore_driver/main.c
+++ b/drivers/gud/mobicore_driver/main.c
@@ -1112,7 +1112,7 @@
 static struct miscdevice mc_admin_device = {
 	.name	= MC_ADMIN_DEVNODE,
 	.mode	= (S_IRWXU),
-	.minor	= MISC_DYNAMIC_MINOR,
+	.minor	= 253,
 	.fops	= &mc_admin_fops,
 };
 
@@ -1128,7 +1128,7 @@
 static struct miscdevice mc_user_device = {
 	.name	= MC_USER_DEVNODE,
 	.mode	= (S_IRWXU | S_IRWXG | S_IRWXO),
-	.minor	= MISC_DYNAMIC_MINOR,
+	.minor	= 254,
 	.fops	= &mc_user_fops,
 };
 
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ffddcba..1283fa3 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -55,6 +55,27 @@
 
 	If unsure, say Y.
 
+config UHID
+	tristate "User-space I/O driver support for HID subsystem"
+	depends on HID
+	default n
+	---help---
+	Say Y here if you want to provide HID I/O Drivers from user-space.
+	This allows to write I/O drivers in user-space and feed the data from
+	the device into the kernel. The kernel parses the HID reports, loads the
+	corresponding HID Device Driver or provides input devices on top of your
+	user-space device.
+
+	This driver cannot be used to parse HID-reports in user-space and write
+	special HID-drivers. You should use hidraw for that.
+	Instead, this driver allows to write the transport-layer driver in
+	user-space like USB-HID and Bluetooth-HID do in kernel-space.
+
+	If unsure, say N.
+
+	To compile this driver as a module, choose M here: the
+	module will be called uhid.
+
 source "drivers/hid/usbhid/Kconfig"
 
 menu "Special HID drivers"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22f1d16..9dca845 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -8,6 +8,7 @@
 endif
 
 obj-$(CONFIG_HID)		+= hid.o
+obj-$(CONFIG_UHID)		+= uhid.o
 
 hid-$(CONFIG_HIDRAW)		+= hidraw.o
 
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
new file mode 100644
index 0000000..05ef4b0
--- /dev/null
+++ b/drivers/hid/uhid.c
@@ -0,0 +1,153 @@
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uhid.h>
+#include <linux/wait.h>
+
+#define UHID_NAME	"uhid"
+#define UHID_BUFSIZE	32
+
+struct uhid_device {
+	struct hid_device *hid;
+
+	wait_queue_head_t waitq;
+	spinlock_t qlock;
+	__u8 head;
+	__u8 tail;
+	struct uhid_event *outq[UHID_BUFSIZE];
+};
+
+static struct miscdevice uhid_misc;
+
+static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
+{
+	__u8 newhead;
+
+	newhead = (uhid->head + 1) % UHID_BUFSIZE;
+
+	if (newhead != uhid->tail) {
+		uhid->outq[uhid->head] = ev;
+		uhid->head = newhead;
+		wake_up_interruptible(&uhid->waitq);
+	} else {
+		hid_warn(uhid->hid, "Output queue is full\n");
+		kfree(ev);
+	}
+}
+
+static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
+{
+	unsigned long flags;
+	struct uhid_event *ev;
+
+	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev)
+		return -ENOMEM;
+
+	ev->type = event;
+
+	spin_lock_irqsave(&uhid->qlock, flags);
+	uhid_queue(uhid, ev);
+	spin_unlock_irqrestore(&uhid->qlock, flags);
+
+	return 0;
+}
+
+static int uhid_char_open(struct inode *inode, struct file *file)
+{
+	struct uhid_device *uhid;
+
+	uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
+	if (!uhid)
+		return -ENOMEM;
+
+	spin_lock_init(&uhid->qlock);
+	init_waitqueue_head(&uhid->waitq);
+
+	file->private_data = uhid;
+	nonseekable_open(inode, file);
+
+	return 0;
+}
+
+static int uhid_char_release(struct inode *inode, struct file *file)
+{
+	struct uhid_device *uhid = file->private_data;
+	unsigned int i;
+
+	for (i = 0; i < UHID_BUFSIZE; ++i)
+		kfree(uhid->outq[i]);
+
+	kfree(uhid);
+
+	return 0;
+}
+
+static ssize_t uhid_char_read(struct file *file, char __user *buffer,
+				size_t count, loff_t *ppos)
+{
+	return 0;
+}
+
+static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
+				size_t count, loff_t *ppos)
+{
+	return 0;
+}
+
+static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+{
+	return 0;
+}
+
+static const struct file_operations uhid_fops = {
+	.owner		= THIS_MODULE,
+	.open		= uhid_char_open,
+	.release	= uhid_char_release,
+	.read		= uhid_char_read,
+	.write		= uhid_char_write,
+	.poll		= uhid_char_poll,
+	.llseek		= no_llseek,
+};
+
+static struct miscdevice uhid_misc = {
+	.fops		= &uhid_fops,
+	.minor		= MISC_DYNAMIC_MINOR,
+	.name		= UHID_NAME,
+};
+
+static int __init uhid_init(void)
+{
+	return misc_register(&uhid_misc);
+}
+
+static void __exit uhid_exit(void)
+{
+	misc_deregister(&uhid_misc);
+}
+
+module_init(uhid_init);
+module_exit(uhid_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index e4a9e30..2017c8d 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -391,10 +391,13 @@
 	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	uint32_t num = 0;
 
-	num = iadc->adc->calib.offset_raw - iadc->adc->calib.offset_raw;
+	if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+		pr_err("raw offset errors! raw_gain:0x%x and raw_offset:0x%x\n",
+			iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
+		return -EINVAL;
+	}
 
-	iadc->adc->calib.offset_uv = (num * QPNP_ADC_GAIN_NV)/
-		(iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
+	iadc->adc->calib.offset_uv = 0;
 
 	num = iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw;
 
@@ -437,6 +440,10 @@
 	}
 
 	rc = qpnp_convert_raw_offset_voltage();
+	if (rc < 0) {
+		pr_err("qpnp raw_voltage conversion failed\n");
+		goto fail;
+	}
 
 	rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >>
 							QPNP_BIT_SHIFT_8;
@@ -558,7 +565,7 @@
 {
 	struct qpnp_iadc_drv *iadc = qpnp_iadc;
 	struct qpnp_vadc_result result_pmic_therm;
-	int rc;
+	int rc = 0;
 
 	rc = qpnp_vadc_read(DIE_TEMP, &result_pmic_therm);
 	if (rc < 0)
@@ -572,7 +579,7 @@
 			pr_err("periodic IADC calibration failed\n");
 	}
 
-	return 0;
+	return rc;
 }
 
 int32_t qpnp_iadc_read(enum qpnp_iadc_channels channel,
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index da43a08..ca0a439 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -129,6 +129,8 @@
 
 #define QUP_MAX_CLK_STATE_RETRIES	300
 #define DEFAULT_CLK_RATE		(19200000)
+#define I2C_STATUS_CLK_STATE		13
+#define QUP_OUT_FIFO_NOT_EMPTY		0x10
 
 static char const * const i2c_rsrcs[] = {"i2c_clk", "i2c_sda"};
 
@@ -380,6 +382,7 @@
 static int qup_i2c_poll_clock_ready(struct qup_i2c_dev *dev)
 {
 	uint32_t retries = 0;
+	uint32_t op_flgs = -1, clk_state = -1;
 
 	/*
 	 * Wait for the clock state to transition to either IDLE or FORCED
@@ -388,16 +391,32 @@
 
 	while (retries++ < QUP_MAX_CLK_STATE_RETRIES) {
 		uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
-		uint32_t clk_state = (status >> 13) & 0x7;
+		clk_state = (status >> I2C_STATUS_CLK_STATE) & 0x7;
+		/* Read the operational register */
+		op_flgs = readl_relaxed(dev->base +
+			QUP_OPERATIONAL) & QUP_OUT_FIFO_NOT_EMPTY;
 
-		if (clk_state == I2C_CLK_RESET_BUSIDLE_STATE ||
-				clk_state == I2C_CLK_FORCED_LOW_STATE)
+		/*
+		 * In very corner case when slave do clock stretching and
+		 * output fifo will have 1 block of data space empty at
+		 * the same time.  So i2c qup will get output service
+		 * interrupt and as it doesn't have more data to be written.
+		 * This can lead to issue where output fifo is not empty.
+		*/
+		if (op_flgs == 0 &&
+			(clk_state == I2C_CLK_RESET_BUSIDLE_STATE ||
+			clk_state == I2C_CLK_FORCED_LOW_STATE)){
+			dev_dbg(dev->dev, "clk_state 0x%x op_flgs [%x]\n",
+				clk_state, op_flgs);
 			return 0;
+		}
+
 		/* 1-bit delay before we check again */
 		udelay(dev->one_bit_t);
 	}
 
-	dev_err(dev->dev, "Error waiting for clk ready\n");
+	dev_err(dev->dev, "Error waiting for clk ready clk_state: 0x%x op_flgs: 0x%x\n",
+		clk_state, op_flgs);
 	return -ETIMEDOUT;
 }
 
@@ -675,7 +694,7 @@
 	int gpio_dat;
 	bool gpio_clk_status = false;
 	uint32_t status = readl_relaxed(dev->base + QUP_I2C_STATUS);
-	struct gpiomux_setting old_gpio_setting;
+	struct gpiomux_setting old_gpio_setting[ARRAY_SIZE(i2c_rsrcs)];
 
 	if (dev->pdata->msm_i2c_config_gpio)
 		return;
@@ -695,7 +714,7 @@
 	disable_irq(dev->err_irq);
 	for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
 		if (msm_gpiomux_write(dev->i2c_gpios[i], GPIOMUX_ACTIVE,
-				&recovery_config, &old_gpio_setting)) {
+				&recovery_config, &old_gpio_setting[i])) {
 			dev_err(dev->dev, "GPIO pins have no active setting\n");
 			goto recovery_end;
 		}
@@ -725,7 +744,7 @@
 	/* Configure ALT funciton to QUP I2C*/
 	for (i = 0; i < ARRAY_SIZE(i2c_rsrcs); ++i) {
 		msm_gpiomux_write(dev->i2c_gpios[i], GPIOMUX_ACTIVE,
-				&old_gpio_setting, NULL);
+				&old_gpio_setting[i], NULL);
 	}
 
 	udelay(10);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index badbc2b..4c72b65 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -447,6 +447,17 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called pmic8xxx-keypad.
 
+config KEYBOARD_QPNP
+	tristate "Qualcomm QPNP PMIC keypad support"
+	depends on OF_SPMI && SPMI && MSM_QPNP_INT
+	help
+	  Say Y here if you want to enable the driver for the QPNP PMIC
+	  keypad provided as a reference design from Qualcomm. This is intended
+	  to support upto 10 x 8 matrix based keypad design.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called qpnp-keypad.
+
 config KEYBOARD_SAMSUNG
 	tristate "Samsung keypad support"
 	depends on HAVE_CLK
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 61b57ef..833904a 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -36,6 +36,7 @@
 obj-$(CONFIG_KEYBOARD_OMAP4)		+= omap4-keypad.o
 obj-$(CONFIG_KEYBOARD_OPENCORES)	+= opencores-kbd.o
 obj-$(CONFIG_KEYBOARD_PMIC8XXX)		+= pmic8xxx-keypad.o
+obj-$(CONFIG_KEYBOARD_QPNP)		+= qpnp-keypad.o
 obj-$(CONFIG_KEYBOARD_PXA27x)		+= pxa27x_keypad.o
 obj-$(CONFIG_KEYBOARD_PXA930_ROTARY)	+= pxa930_rotary.o
 obj-$(CONFIG_KEYBOARD_QT1070)           += qt1070.o
diff --git a/drivers/input/keyboard/qpnp-keypad.c b/drivers/input/keyboard/qpnp-keypad.c
new file mode 100644
index 0000000..a46e3b5
--- /dev/null
+++ b/drivers/input/keyboard/qpnp-keypad.c
@@ -0,0 +1,852 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/spmi.h>
+
+#define QPNP_MAX_ROWS			10
+#define QPNP_MAX_COLS			8
+#define QPNP_MIN_ROWS			2
+#define QPNP_MIN_COLS			1
+#define QPNP_ROW_SHIFT			3
+#define QPNP_MATRIX_MAX_SIZE		(QPNP_MAX_ROWS * QPNP_MAX_COLS)
+
+/* in ms */
+#define MAX_SCAN_DELAY			128
+#define MIN_SCAN_DELAY			1
+#define KEYP_DEFAULT_SCAN_DELAY		32
+
+/* in ns */
+#define MAX_ROW_HOLD_DELAY		250000
+#define MIN_ROW_HOLD_DELAY		31250
+
+/* in ms */
+#define MAX_DEBOUNCE_TIME		20
+#define MIN_DEBOUNCE_TIME		5
+#define KEYP_DEFAULT_DEBOUNCE		15
+
+/* register offsets */
+#define KEYP_STATUS(base)		(base + 0x08)
+#define KEYP_SIZE_CTRL(base)		(base + 0x40)
+#define KEYP_SCAN_CTRL(base)		(base + 0x42)
+#define KEYP_FSM_CNTL(base)		(base + 0x44)
+#define KEYP_EN_CTRL(base)		(base + 0x46)
+
+#define KEYP_CTRL_KEYP_EN		BIT(7)
+#define KEYP_CTRL_EVNTS			BIT(0)
+#define KEYP_CTRL_EVNTS_MASK		0x3
+
+#define KEYP_SIZE_COLS_SHIFT		4
+#define KEYP_SIZE_COLS_MASK		0x70
+#define KEYP_SIZE_ROWS_MASK		0x0F
+
+#define KEYP_SCAN_DBC_MASK		0x03
+#define KEYP_SCAN_SCNP_MASK		0x38
+#define KEYP_SCAN_ROWP_MASK		0xC0
+#define KEYP_SCAN_SCNP_SHIFT		3
+#define KEYP_SCAN_ROWP_SHIFT		6
+
+#define KEYP_CTRL_SCAN_ROWS_BITS	0x7
+
+#define KEYP_SCAN_DBOUNCE_SHIFT		1
+#define KEYP_SCAN_PAUSE_SHIFT		3
+#define KEYP_SCAN_ROW_HOLD_SHIFT	6
+
+#define KEYP_FSM_READ_EN		BIT(0)
+
+/* bits of these registers represent
+ * '0' for key press
+ * '1' for key release
+ */
+#define KEYP_RECENT_DATA(base)		(base + 0x7C)
+#define KEYP_OLD_DATA(base)		(base + 0x5C)
+
+#define KEYP_CLOCK_FREQ			32768
+
+struct qpnp_kp {
+	const struct matrix_keymap_data *keymap_data;
+	struct input_dev *input;
+	struct spmi_device *spmi;
+
+	int key_sense_irq;
+	int key_stuck_irq;
+	u16 base;
+
+	u32 num_rows;
+	u32 num_cols;
+	u32 debounce_ms;
+	u32 row_hold_ns;
+	u32 scan_delay_ms;
+	bool wakeup;
+	bool rep;
+
+	unsigned short keycodes[QPNP_MATRIX_MAX_SIZE];
+
+	u16 keystate[QPNP_MAX_ROWS];
+	u16 stuckstate[QPNP_MAX_ROWS];
+};
+
+static int qpnp_kp_write_u8(struct qpnp_kp *kp, u8 data, u16 reg)
+{
+	int rc;
+
+	rc = spmi_ext_register_writel(kp->spmi->ctrl, kp->spmi->sid,
+							reg, &data, 1);
+	if (rc < 0)
+		dev_err(&kp->spmi->dev,
+			"Error writing to address: %X - ret %d\n", reg, rc);
+
+	return rc;
+}
+
+static int qpnp_kp_read(struct qpnp_kp *kp,
+				u8 *data, u16 reg, unsigned num_bytes)
+{
+	int rc;
+
+	rc = spmi_ext_register_readl(kp->spmi->ctrl, kp->spmi->sid,
+						reg, data, num_bytes);
+	if (rc < 0)
+		dev_err(&kp->spmi->dev,
+			"Error reading from address : %X - ret %d\n", reg, rc);
+
+	return rc;
+}
+
+static int qpnp_kp_read_u8(struct qpnp_kp *kp, u8 *data, u16 reg)
+{
+	int rc;
+
+	rc = qpnp_kp_read(kp, data, reg, 1);
+	if (rc < 0)
+		dev_err(&kp->spmi->dev, "Error reading qpnp: %X - ret %d\n",
+				reg, rc);
+	return rc;
+}
+
+static u8 qpnp_col_state(struct qpnp_kp *kp, u8 col)
+{
+	/* all keys pressed on that particular row? */
+	if (col == 0x00)
+		return 1 << kp->num_cols;
+	else
+		return col & ((1 << kp->num_cols) - 1);
+}
+
+/*
+ * Synchronous read protocol
+ *
+ * 1. Write '1' to ReadState bit in KEYP_FSM_CNTL register
+ * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode
+ *    synchronously
+ * 3. Read rows in old array first if events are more than one
+ * 4. Read rows in recent array
+ * 5. Wait 4*32KHz clocks
+ * 6. Write '0' to ReadState bit of KEYP_FSM_CNTL register so that hw can
+ *    synchronously exit read mode.
+ */
+static int qpnp_sync_read(struct qpnp_kp *kp, bool enable)
+{
+	int rc;
+	u8 fsm_ctl;
+
+	rc = qpnp_kp_read_u8(kp, &fsm_ctl, KEYP_FSM_CNTL(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+				"Error reading KEYP_FSM_CNTL reg, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (enable)
+		fsm_ctl |= KEYP_FSM_READ_EN;
+	else
+		fsm_ctl &= ~KEYP_FSM_READ_EN;
+
+	rc = qpnp_kp_write_u8(kp, fsm_ctl, KEYP_FSM_CNTL(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+				"Error writing KEYP_FSM_CNTL reg, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* 2 * 32KHz clocks */
+	udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
+
+	return rc;
+}
+
+static int qpnp_kp_read_data(struct qpnp_kp *kp, u16 *state,
+					u16 data_reg, int read_rows)
+{
+	int rc, row;
+	u8 new_data[QPNP_MAX_ROWS];
+
+	/*
+	 * Check if last row will be scanned. If not, scan to clear key event
+	 * counter
+	 */
+	if (kp->num_rows < QPNP_MAX_ROWS) {
+		rc = qpnp_kp_read_u8(kp, &new_data[QPNP_MAX_ROWS - 1],
+					data_reg + (QPNP_MAX_ROWS - 1) * 2);
+		if (rc)
+			return rc;
+	}
+
+	for (row = 0; row < kp->num_rows; row++) {
+		rc = qpnp_kp_read_u8(kp, &new_data[row], data_reg + row * 2);
+		if (rc)
+			return rc;
+
+		dev_dbg(&kp->spmi->dev, "new_data[%d] = %d\n", row,
+					new_data[row]);
+		state[row] = qpnp_col_state(kp, new_data[row]);
+	}
+
+	return 0;
+}
+
+static int qpnp_kp_read_matrix(struct qpnp_kp *kp, u16 *new_state,
+					 u16 *old_state)
+{
+	int rc, read_rows;
+
+	read_rows = kp->num_rows;
+
+	rc = qpnp_sync_read(kp, true);
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error setting the FSM read enable bit rc=%d\n", rc);
+		return rc;
+	}
+
+	if (old_state) {
+		rc = qpnp_kp_read_data(kp, old_state, KEYP_OLD_DATA(kp->base),
+							read_rows);
+		if (rc < 0) {
+			dev_err(&kp->spmi->dev,
+				"Error reading KEYP_OLD_DATA, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = qpnp_kp_read_data(kp, new_state, KEYP_RECENT_DATA(kp->base),
+						 read_rows);
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error reading KEYP_RECENT_DATA, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* 4 * 32KHz clocks */
+	udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
+
+	rc = qpnp_sync_read(kp, false);
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error resetting the FSM read enable bit rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void __qpnp_kp_scan_matrix(struct qpnp_kp *kp, u16 *new_state,
+					 u16 *old_state)
+{
+	int row, col, code;
+
+	for (row = 0; row < kp->num_rows; row++) {
+		int bits_changed = new_state[row] ^ old_state[row];
+
+		if (!bits_changed)
+			continue;
+
+		for (col = 0; col < kp->num_cols; col++) {
+			if (!(bits_changed & (1 << col)))
+				continue;
+
+			dev_dbg(&kp->spmi->dev, "key [%d:%d] %s\n", row, col,
+					!(new_state[row] & (1 << col)) ?
+					"pressed" : "released");
+			code = MATRIX_SCAN_CODE(row, col, QPNP_ROW_SHIFT);
+			input_event(kp->input, EV_MSC, MSC_SCAN, code);
+			input_report_key(kp->input,
+					kp->keycodes[code],
+					!(new_state[row] & (1 << col)));
+			input_sync(kp->input);
+		}
+	}
+}
+
+static bool qpnp_detect_ghost_keys(struct qpnp_kp *kp, u16 *new_state)
+{
+	int row, found_first = -1;
+	u16 check, row_state;
+
+	check = 0;
+	for (row = 0; row < kp->num_rows; row++) {
+		row_state = (~new_state[row]) &
+				 ((1 << kp->num_cols) - 1);
+
+		if (hweight16(row_state) > 1) {
+			if (found_first == -1)
+				found_first = row;
+			if (check & row_state) {
+				dev_dbg(&kp->spmi->dev,
+					"detected ghost key row[%d],row[%d]\n",
+					found_first, row);
+				return true;
+			}
+		}
+		check |= row_state;
+	}
+	return false;
+}
+
+static int qpnp_kp_scan_matrix(struct qpnp_kp *kp, unsigned int events)
+{
+	u16 new_state[QPNP_MAX_ROWS];
+	u16 old_state[QPNP_MAX_ROWS];
+	int rc;
+	switch (events) {
+	case 0x1:
+		rc = qpnp_kp_read_matrix(kp, new_state, NULL);
+		if (rc < 0)
+			return rc;
+
+		/* detecting ghost key is not an error */
+		if (qpnp_detect_ghost_keys(kp, new_state))
+			return 0;
+		__qpnp_kp_scan_matrix(kp, new_state, kp->keystate);
+		memcpy(kp->keystate, new_state, sizeof(new_state));
+	break;
+	case 0x3: /* two events - eventcounter is gray-coded */
+		rc = qpnp_kp_read_matrix(kp, new_state, old_state);
+		if (rc < 0)
+			return rc;
+
+		__qpnp_kp_scan_matrix(kp, old_state, kp->keystate);
+		__qpnp_kp_scan_matrix(kp, new_state, old_state);
+		memcpy(kp->keystate, new_state, sizeof(new_state));
+	break;
+	case 0x2:
+		dev_dbg(&kp->spmi->dev, "Some key events were lost\n");
+		rc = qpnp_kp_read_matrix(kp, new_state, old_state);
+		if (rc < 0)
+			return rc;
+		__qpnp_kp_scan_matrix(kp, old_state, kp->keystate);
+		__qpnp_kp_scan_matrix(kp, new_state, old_state);
+		memcpy(kp->keystate, new_state, sizeof(new_state));
+	break;
+	default:
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+/*
+ * NOTE: We are reading recent and old data registers blindly
+ * whenever key-stuck interrupt happens, because events counter doesn't
+ * get updated when this interrupt happens due to key stuck doesn't get
+ * considered as key state change.
+ *
+ * We are not using old data register contents after they are being read
+ * because it might report the key which was pressed before the key being stuck
+ * as stuck key because it's pressed status is stored in the old data
+ * register.
+ */
+static irqreturn_t qpnp_kp_stuck_irq(int irq, void *data)
+{
+	u16 new_state[QPNP_MAX_ROWS];
+	u16 old_state[QPNP_MAX_ROWS];
+	int rc;
+	struct qpnp_kp *kp = data;
+
+	rc = qpnp_kp_read_matrix(kp, new_state, old_state);
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev, "failed to read keypad matrix\n");
+		return IRQ_HANDLED;
+	}
+
+	__qpnp_kp_scan_matrix(kp, new_state, kp->stuckstate);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_kp_irq(int irq, void *data)
+{
+	struct qpnp_kp *kp = data;
+	u8 ctrl_val, events;
+	int rc;
+
+	rc = qpnp_kp_read_u8(kp, &ctrl_val, KEYP_STATUS(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error reading KEYP_STATUS register\n");
+		return IRQ_HANDLED;
+	}
+
+	events = ctrl_val & KEYP_CTRL_EVNTS_MASK;
+
+	rc = qpnp_kp_scan_matrix(kp, events);
+	if (rc < 0)
+		dev_err(&kp->spmi->dev, "failed to scan matrix\n");
+
+	return IRQ_HANDLED;
+}
+
+static int __devinit qpnp_kpd_init(struct qpnp_kp *kp)
+{
+	int bits, rc, cycles;
+	u8 kpd_scan_cntl, kpd_size_cntl;
+
+	/* Configure the SIZE register, #rows and #columns */
+	rc = qpnp_kp_read_u8(kp, &kpd_size_cntl, KEYP_SIZE_CTRL(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error reading KEYP_SIZE_CTRL reg, rc=%d\n", rc);
+		return rc;
+	}
+
+	kpd_size_cntl &= (~KEYP_SIZE_COLS_MASK | ~KEYP_SIZE_ROWS_MASK);
+	kpd_size_cntl |= (((kp->num_cols - 1) << KEYP_SIZE_COLS_SHIFT) &
+							KEYP_SIZE_COLS_MASK);
+	kpd_size_cntl |= ((kp->num_rows - 1) & KEYP_SIZE_ROWS_MASK);
+
+	rc = qpnp_kp_write_u8(kp, kpd_size_cntl, KEYP_SIZE_CTRL(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error writing to KEYP_SIZE_CTRL reg, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Configure the SCAN CTL register, debounce, row pause, scan delay */
+	rc = qpnp_kp_read_u8(kp, &kpd_scan_cntl, KEYP_SCAN_CTRL(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error reading KEYP_SCAN_CTRL reg, rc=%d\n", rc);
+		return rc;
+	}
+
+	kpd_scan_cntl &= (~KEYP_SCAN_DBC_MASK | ~KEYP_SCAN_SCNP_MASK |
+						~KEYP_SCAN_ROWP_MASK);
+	kpd_scan_cntl |= (((kp->debounce_ms / 5) - 1) & KEYP_SCAN_DBC_MASK);
+
+	bits = fls(kp->scan_delay_ms) - 1;
+	kpd_scan_cntl |= ((bits << KEYP_SCAN_SCNP_SHIFT) & KEYP_SCAN_SCNP_MASK);
+
+	/* Row hold time is a multiple of 32KHz cycles. */
+	cycles = (kp->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
+	if (cycles)
+		cycles = ilog2(cycles);
+	kpd_scan_cntl |= ((cycles << KEYP_SCAN_ROW_HOLD_SHIFT) &
+							KEYP_SCAN_ROWP_MASK);
+
+	rc = qpnp_kp_write_u8(kp, kpd_scan_cntl, KEYP_SCAN_CTRL(kp->base));
+	if (rc)
+		dev_err(&kp->spmi->dev,
+			"Error writing KEYP_SCAN reg, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_kp_enable(struct qpnp_kp *kp)
+{
+	int rc;
+	u8 kpd_cntl;
+
+	rc = qpnp_kp_read_u8(kp, &kpd_cntl, KEYP_EN_CTRL(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error reading KEYP_EN_CTRL reg, rc=%d\n", rc);
+		return rc;
+	}
+
+	kpd_cntl |= KEYP_CTRL_KEYP_EN;
+
+	rc = qpnp_kp_write_u8(kp, kpd_cntl, KEYP_EN_CTRL(kp->base));
+	if (rc < 0)
+		dev_err(&kp->spmi->dev,
+			"Error writing KEYP_CTRL reg, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_kp_disable(struct qpnp_kp *kp)
+{
+	int rc;
+	u8 kpd_cntl;
+
+	rc = qpnp_kp_read_u8(kp, &kpd_cntl, KEYP_EN_CTRL(kp->base));
+	if (rc < 0) {
+		dev_err(&kp->spmi->dev,
+			"Error reading KEYP_EN_CTRL reg, rc=%d\n", rc);
+		return rc;
+	}
+
+	kpd_cntl &= ~KEYP_CTRL_KEYP_EN;
+
+	rc = qpnp_kp_write_u8(kp, kpd_cntl, KEYP_EN_CTRL(kp->base));
+	if (rc < 0)
+		dev_err(&kp->spmi->dev,
+			"Error writing KEYP_CTRL reg, rc=%d\n", rc);
+
+	return rc;
+}
+
+static int qpnp_kp_open(struct input_dev *dev)
+{
+	struct qpnp_kp *kp = input_get_drvdata(dev);
+
+	return qpnp_kp_enable(kp);
+}
+
+static void qpnp_kp_close(struct input_dev *dev)
+{
+	struct qpnp_kp *kp = input_get_drvdata(dev);
+
+	qpnp_kp_disable(kp);
+}
+
+static int __devinit qpnp_keypad_parse_dt(struct qpnp_kp *kp)
+{
+	struct matrix_keymap_data *keymap_data;
+	int rc, keymap_len, i;
+	u32 *keymap;
+	const __be32 *map;
+
+	rc = of_property_read_u32(kp->spmi->dev.of_node,
+				"keypad,num-rows", &kp->num_rows);
+	if (rc) {
+		dev_err(&kp->spmi->dev, "Unable to parse 'num-rows'\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(kp->spmi->dev.of_node,
+				"keypad,num-cols", &kp->num_cols);
+	if (rc) {
+		dev_err(&kp->spmi->dev, "Unable to parse 'num-cols'\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(kp->spmi->dev.of_node,
+				"qcom,scan-delay-ms", &kp->scan_delay_ms);
+	if (rc && rc != -EINVAL) {
+		dev_err(&kp->spmi->dev, "Unable to parse 'scan-delay-ms'\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(kp->spmi->dev.of_node,
+				"qcom,row-hold-ns", &kp->row_hold_ns);
+	if (rc && rc != -EINVAL) {
+		dev_err(&kp->spmi->dev, "Unable to parse 'row-hold-ns'\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(kp->spmi->dev.of_node,
+					"qcom,debounce-ms", &kp->debounce_ms);
+	if (rc && rc != -EINVAL) {
+		dev_err(&kp->spmi->dev, "Unable to parse 'debounce-ms'\n");
+		return rc;
+	}
+
+	kp->wakeup = of_property_read_bool(kp->spmi->dev.of_node,
+							"qcom,wakeup");
+
+	kp->rep = !of_property_read_bool(kp->spmi->dev.of_node,
+					"linux,keypad-no-autorepeat");
+
+	map = of_get_property(kp->spmi->dev.of_node,
+					"linux,keymap", &keymap_len);
+	if (!map) {
+		dev_err(&kp->spmi->dev, "Keymap not specified\n");
+		return -EINVAL;
+	}
+
+	keymap_data = devm_kzalloc(&kp->spmi->dev,
+					sizeof(*keymap_data), GFP_KERNEL);
+	if (!keymap_data) {
+		dev_err(&kp->spmi->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	keymap_data->keymap_size = keymap_len / sizeof(u32);
+
+	keymap = devm_kzalloc(&kp->spmi->dev,
+		sizeof(uint32_t) * keymap_data->keymap_size, GFP_KERNEL);
+	if (!keymap) {
+		dev_err(&kp->spmi->dev, "could not allocate memory for keymap\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < keymap_data->keymap_size; i++) {
+		unsigned int key = be32_to_cpup(map + i);
+		int keycode, row, col;
+
+		row = (key >> 24) & 0xff;
+		col = (key >> 16) & 0xff;
+		keycode = key & 0xffff;
+		keymap[i] = KEY(row, col, keycode);
+	}
+	keymap_data->keymap = keymap;
+	kp->keymap_data = keymap_data;
+
+	return 0;
+}
+
+static int __devinit qpnp_kp_probe(struct spmi_device *spmi)
+{
+	struct qpnp_kp *kp;
+	struct resource *keypad_base;
+	int rc = 0;
+
+	kp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_kp), GFP_KERNEL);
+	if (!kp) {
+		dev_err(&spmi->dev, "%s: Can't allocate qpnp_kp\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	kp->spmi = spmi;
+
+	rc = qpnp_keypad_parse_dt(kp);
+	if (rc < 0) {
+		dev_err(&spmi->dev, "Error parsing device tree\n");
+		return rc;
+	}
+
+	/* the #rows and #columns are compulsary */
+	if (!kp->num_cols || !kp->num_rows ||
+		kp->num_cols > QPNP_MAX_COLS ||
+		kp->num_rows > QPNP_MAX_ROWS ||
+		kp->num_cols < QPNP_MIN_COLS ||
+		kp->num_rows < QPNP_MIN_ROWS) {
+		dev_err(&spmi->dev, "invalid rows/cols input data\n");
+		return -EINVAL;
+	}
+
+	if (!kp->keymap_data) {
+		dev_err(&spmi->dev, "keymap not specified\n");
+		return -EINVAL;
+	}
+
+	/* the below parameters are optional*/
+	if (!kp->scan_delay_ms) {
+		kp->scan_delay_ms = KEYP_DEFAULT_SCAN_DELAY;
+	} else {
+		if (kp->scan_delay_ms > MAX_SCAN_DELAY ||
+			kp->scan_delay_ms < MIN_SCAN_DELAY) {
+			dev_err(&spmi->dev,
+				"invalid keypad scan time supplied\n");
+			return -EINVAL;
+		}
+	}
+
+	if (!kp->row_hold_ns) {
+		kp->row_hold_ns = MIN_ROW_HOLD_DELAY;
+	} else {
+		if (kp->row_hold_ns > MAX_ROW_HOLD_DELAY ||
+			kp->row_hold_ns < MIN_ROW_HOLD_DELAY) {
+			dev_err(&spmi->dev,
+				"invalid keypad row hold time supplied\n");
+			return -EINVAL;
+		}
+	}
+
+	if (!kp->debounce_ms) {
+		kp->debounce_ms = KEYP_DEFAULT_DEBOUNCE;
+	} else {
+		if (kp->debounce_ms > MAX_DEBOUNCE_TIME ||
+			kp->debounce_ms < MIN_DEBOUNCE_TIME ||
+			(kp->debounce_ms % 5 != 0)) {
+			dev_err(&spmi->dev,
+				"invalid debounce time supplied\n");
+			return -EINVAL;
+		}
+	}
+
+	kp->input = input_allocate_device();
+	if (!kp->input) {
+		dev_err(&spmi->dev, "Can't allocate keypad input device\n");
+		return -ENOMEM;
+	}
+
+	kp->key_sense_irq = spmi_get_irq_byname(spmi, NULL, "kp-sense");
+	if (kp->key_sense_irq < 0) {
+		dev_err(&spmi->dev, "Unable to get keypad sense irq\n");
+		return kp->key_sense_irq;
+	}
+
+	kp->key_stuck_irq = spmi_get_irq_byname(spmi, NULL, "kp-stuck");
+	if (kp->key_stuck_irq < 0) {
+		dev_err(&spmi->dev, "Unable to get stuck irq\n");
+		return kp->key_stuck_irq;
+	}
+
+	keypad_base = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+	if (!keypad_base) {
+		dev_err(&spmi->dev, "Unable to get keypad base address\n");
+		return -ENXIO;
+	}
+	kp->base = keypad_base->start;
+
+	kp->input->name = "qpnp_keypad";
+	kp->input->phys = "qpnp_keypad/input0";
+	kp->input->id.version	= 0x0001;
+	kp->input->id.product	= 0x0001;
+	kp->input->id.vendor	= 0x0001;
+
+	kp->input->evbit[0]	= BIT_MASK(EV_KEY);
+
+	if (kp->rep)
+		set_bit(EV_REP, kp->input->evbit);
+
+	kp->input->keycode	= kp->keycodes;
+	kp->input->keycodemax	= QPNP_MATRIX_MAX_SIZE;
+	kp->input->keycodesize	= sizeof(kp->keycodes);
+	kp->input->open		= qpnp_kp_open;
+	kp->input->close	= qpnp_kp_close;
+
+	matrix_keypad_build_keymap(kp->keymap_data, QPNP_ROW_SHIFT,
+					kp->keycodes, kp->input->keybit);
+
+	input_set_capability(kp->input, EV_MSC, MSC_SCAN);
+	input_set_drvdata(kp->input, kp);
+
+	/* initialize keypad state */
+	memset(kp->keystate, 0xff, sizeof(kp->keystate));
+	memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
+
+	rc = qpnp_kpd_init(kp);
+	if (rc < 0) {
+		dev_err(&spmi->dev, "unable to initialize keypad controller\n");
+		return rc;
+	}
+
+	rc = input_register_device(kp->input);
+	if (rc < 0) {
+		dev_err(&spmi->dev, "unable to register keypad input device\n");
+		return rc;
+	}
+
+	rc = devm_request_irq(&spmi->dev, kp->key_sense_irq, qpnp_kp_irq,
+				 IRQF_TRIGGER_RISING, "qpnp-keypad-sense", kp);
+	if (rc < 0) {
+		dev_err(&spmi->dev, "failed to request keypad sense irq\n");
+		return rc;
+	}
+
+	rc = devm_request_irq(&spmi->dev, kp->key_stuck_irq, qpnp_kp_stuck_irq,
+				 IRQF_TRIGGER_RISING, "qpnp-keypad-stuck", kp);
+	if (rc < 0) {
+		dev_err(&spmi->dev, "failed to request keypad stuck irq\n");
+		return rc;
+	}
+
+	device_init_wakeup(&spmi->dev, kp->wakeup);
+
+	return rc;
+}
+
+static int qpnp_kp_remove(struct spmi_device *spmi)
+{
+	struct qpnp_kp *kp = dev_get_drvdata(&spmi->dev);
+
+	device_init_wakeup(&spmi->dev, 0);
+	input_unregister_device(kp->input);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qpnp_kp_suspend(struct device *dev)
+{
+	struct qpnp_kp *kp = dev_get_drvdata(dev);
+	struct input_dev *input_dev = kp->input;
+
+	if (device_may_wakeup(dev)) {
+		enable_irq_wake(kp->key_sense_irq);
+	} else {
+		mutex_lock(&input_dev->mutex);
+
+		if (input_dev->users)
+			qpnp_kp_disable(kp);
+
+		mutex_unlock(&input_dev->mutex);
+	}
+
+	return 0;
+}
+
+static int qpnp_kp_resume(struct device *dev)
+{
+	struct qpnp_kp *kp = dev_get_drvdata(dev);
+	struct input_dev *input_dev = kp->input;
+
+	if (device_may_wakeup(dev)) {
+		disable_irq_wake(kp->key_sense_irq);
+	} else {
+		mutex_lock(&input_dev->mutex);
+
+		if (input_dev->users)
+			qpnp_kp_enable(kp);
+
+		mutex_unlock(&input_dev->mutex);
+	}
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qpnp_kp_pm_ops,
+			 qpnp_kp_suspend, qpnp_kp_resume);
+
+static struct of_device_id spmi_match_table[] = {
+	{	.compatible = "qcom,qpnp-keypad",
+	},
+	{}
+};
+
+static struct spmi_driver qpnp_kp_driver = {
+	.probe		= qpnp_kp_probe,
+	.remove		= __devexit_p(qpnp_kp_remove),
+	.driver		= {
+		.name = "qcom,qpnp-keypad",
+		.of_match_table = spmi_match_table,
+		.owner = THIS_MODULE,
+		.pm = &qpnp_kp_pm_ops,
+	},
+};
+
+static int __init qpnp_kp_init(void)
+{
+	return spmi_driver_register(&qpnp_kp_driver);
+}
+module_init(qpnp_kp_init);
+
+static void __exit qpnp_kp_exit(void)
+{
+	spmi_driver_unregister(&qpnp_kp_driver);
+}
+module_exit(qpnp_kp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QPNP keypad driver");
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 0c20815..0ea230a 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -369,6 +369,7 @@
 	struct regulator *vcc_ana;
 	struct regulator *vcc_dig;
 	struct regulator *vcc_i2c;
+	struct mxt_address_pair addr_pair;
 #if defined(CONFIG_FB)
 	struct notifier_block fb_notif;
 #elif defined(CONFIG_HAS_EARLYSUSPEND)
@@ -490,9 +491,27 @@
 	dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
 }
 
-static int mxt_switch_to_bootloader_address(struct mxt_data *data)
+static int mxt_lookup_bootloader_address(struct mxt_data *data)
 {
 	int i;
+
+	for (i = 0; mxt_slave_addresses[i].application != 0;  i++) {
+		if (mxt_slave_addresses[i].application ==
+				data->client->addr) {
+			data->addr_pair.bootloader =
+				mxt_slave_addresses[i].bootloader;
+			return 0;
+		}
+	}
+
+	dev_err(&data->client->dev, "Address 0x%02x not found in address table",
+			data->client->addr);
+	return -EINVAL;
+
+};
+
+static int mxt_switch_to_bootloader_address(struct mxt_data *data)
+{
 	struct i2c_client *client = data->client;
 
 	if (data->state == BOOTLOADER) {
@@ -500,27 +519,16 @@
 		return -EINVAL;
 	}
 
-	for (i = 0; mxt_slave_addresses[i].application != 0;  i++) {
-		if (mxt_slave_addresses[i].application == client->addr) {
-			dev_info(&client->dev, "Changing to bootloader address: "
-				"%02x -> %02x",
-				client->addr,
-				mxt_slave_addresses[i].bootloader);
+	dev_info(&client->dev, "Changing to bootloader address: 0x%02x -> 0x%02x",
+			client->addr, data->addr_pair.bootloader);
 
-			client->addr = mxt_slave_addresses[i].bootloader;
-			data->state = BOOTLOADER;
-			return 0;
-		}
-	}
-
-	dev_err(&client->dev, "Address 0x%02x not found in address table",
-								client->addr);
-	return -EINVAL;
+	client->addr = data->addr_pair.bootloader;
+	data->state = BOOTLOADER;
+	return 0;
 }
 
 static int mxt_switch_to_appmode_address(struct mxt_data *data)
 {
-	int i;
 	struct i2c_client *client = data->client;
 
 	if (data->state == APPMODE) {
@@ -528,23 +536,13 @@
 		return -EINVAL;
 	}
 
-	for (i = 0; mxt_slave_addresses[i].application != 0;  i++) {
-		if (mxt_slave_addresses[i].bootloader == client->addr) {
-			dev_info(&client->dev,
-				"Changing to application mode address: "
-							"0x%02x -> 0x%02x",
-				client->addr,
-				mxt_slave_addresses[i].application);
+	dev_info(&client->dev, "Changing to application mode address: " \
+			"0x%02x -> 0x%02x", client->addr,
+			data->addr_pair.application);
 
-			client->addr = mxt_slave_addresses[i].application;
-			data->state = APPMODE;
-			return 0;
-		}
-	}
-
-	dev_err(&client->dev, "Address 0x%02x not found in address table",
-								client->addr);
-	return -EINVAL;
+	client->addr = data->addr_pair.application;
+	data->state = APPMODE;
+	return 0;
 }
 
 static int mxt_get_bootloader_version(struct i2c_client *client, u8 val)
@@ -888,6 +886,17 @@
 	input_sync(input_dev);
 }
 
+static void mxt_release_all(struct mxt_data *data)
+{
+	int id;
+
+	for (id = 0; id < MXT_MAX_FINGER; id++)
+		if (data->finger[id].status)
+			data->finger[id].status = MXT_RELEASE;
+
+	mxt_input_report(data, 0);
+}
+
 static void mxt_input_touchevent(struct mxt_data *data,
 				      struct mxt_message *message, int id)
 {
@@ -899,6 +908,10 @@
 	int area;
 	int pressure;
 
+	if (status & MXT_SUPPRESS) {
+		mxt_release_all(data);
+		return;
+	}
 	/* Check the touch is present on the screen */
 	if (!(status & MXT_DETECT)) {
 		if (status & MXT_RELEASE) {
@@ -973,18 +986,7 @@
 	data->keyarray_old = data->keyarray_new;
 }
 
-static void mxt_release_all(struct mxt_data *data)
-{
-	int id;
-
-	for (id = 0; id < MXT_MAX_FINGER; id++)
-		if (data->finger[id].status)
-			data->finger[id].status = MXT_RELEASE;
-
-	mxt_input_report(data, 0);
-}
-
-static void mxt_handle_touch_supression(struct mxt_data *data, u8 status)
+static void mxt_handle_touch_suppression(struct mxt_data *data, u8 status)
 {
 	dev_dbg(&data->client->dev, "touch suppression\n");
 	/* release all touches */
@@ -1039,7 +1041,7 @@
 
 		id = reportid - data->t9_min_reportid;
 
-		 /* check whether report id is part of T9,T15 or T42*/
+		 /* check whether report id is part of T9, T15 or T42 */
 		if (reportid >= data->t9_min_reportid &&
 					reportid <= data->t9_max_reportid)
 			mxt_input_touchevent(data, &message, id);
@@ -1047,8 +1049,9 @@
 					reportid <= data->t15_max_reportid)
 			mxt_handle_key_array(data, &message);
 		else if (reportid >= data->t42_min_reportid &&
-					reportid <= data->t42_max_reportid)
-			mxt_handle_touch_supression(data, message.message[0]);
+				reportid <= data->t42_max_reportid)
+			mxt_handle_touch_suppression(data,
+					message.message[0]);
 		else
 			mxt_dump_message(dev, &message);
 	} while (reportid != 0xff);
@@ -1695,9 +1698,11 @@
 	switch (data->info.family_id) {
 	case MXT224_ID:
 	case MXT224E_ID:
+	case MXT336S_ID:
 		max_frame_size = MXT_SINGLE_FW_MAX_FRAME_SIZE;
 		break;
 	case MXT1386_ID:
+	case MXT1664S_ID:
 		max_frame_size = MXT_CHIPSET_FW_MAX_FRAME_SIZE;
 		break;
 	default:
@@ -2686,6 +2691,12 @@
 		return -ENOMEM;
 	}
 
+	rc = of_property_read_u32(np, "atmel,bl-addr", &temp_val);
+	if (rc && (rc != -EINVAL))
+		dev_err(dev, "Unable to read bootloader address\n");
+	else if (rc != -EINVAL)
+		pdata->bl_addr = (u8) temp_val;
+
 	pdata->config_array  = info;
 
 	for_each_child_of_node(np, temp) {
@@ -2724,12 +2735,11 @@
 		} else
 			info->build = (u8) temp_val;
 
-		info->bootldr_id = of_property_read_u32(temp,
+		rc = of_property_read_u32(temp,
 					"atmel,bootldr-id", &temp_val);
-		if (rc) {
+		if (rc && (rc != -EINVAL))
 			dev_err(dev, "Unable to read bootldr-id\n");
-			return rc;
-		} else
+		else if (rc != -EINVAL)
 			info->bootldr_id = (u8) temp_val;
 
 		rc = mxt_parse_config(dev, temp, info);
@@ -2937,6 +2947,13 @@
 
 	mxt_power_on_delay(data);
 
+	data->addr_pair.application = data->client->addr;
+
+	if (pdata->bl_addr)
+		data->addr_pair.bootloader = pdata->bl_addr;
+	else
+		mxt_lookup_bootloader_address(data);
+
 	error = mxt_initialize(data);
 	if (error)
 		goto err_reset_gpio_req;
diff --git a/drivers/input/touchscreen/synaptics_fw_update.c b/drivers/input/touchscreen/synaptics_fw_update.c
index abb5bbc..7452587 100644
--- a/drivers/input/touchscreen/synaptics_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_fw_update.c
@@ -1714,5 +1714,4 @@
 
 MODULE_AUTHOR("Synaptics, Inc.");
 MODULE_DESCRIPTION("RMI4 FW Update Module");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(SYNAPTICS_RMI4_DRIVER_VERSION_STRING);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
index c68f730..2c79276 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.c
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -5,6 +5,7 @@
  *
  * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
  * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -71,6 +72,19 @@
 #define NO_SLEEP_OFF (0 << 3)
 #define NO_SLEEP_ON (1 << 3)
 
+#define RMI4_VTG_MIN_UV		2700000
+#define RMI4_VTG_MAX_UV		3300000
+#define RMI4_ACTIVE_LOAD_UA	15000
+#define RMI4_LPM_LOAD_UA	10
+
+#define RMI4_I2C_VTG_MIN_UV	1800000
+#define RMI4_I2C_VTG_MAX_UV	1800000
+#define RMI4_I2C_LOAD_UA	10000
+#define RMI4_I2C_LPM_LOAD_UA	10
+
+#define RMI4_GPIO_SLEEP_LOW_US 10000
+#define RMI4_GPIO_WAIT_HIGH_MS 25
+
 static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
 		unsigned short addr, unsigned char *data,
 		unsigned short length);
@@ -1507,10 +1521,6 @@
 			container_of(work, struct synaptics_rmi4_data,
 			det_work.work);
 
-	queue_delayed_work(rmi4_data->det_workqueue,
-			&rmi4_data->det_work,
-			msecs_to_jiffies(EXP_FN_DET_INTERVAL));
-
 	mutex_lock(&exp_fn_list_mutex);
 	if (!list_empty(&exp_fn_list)) {
 		list_for_each_entry_safe(exp_fhandler,
@@ -1594,6 +1604,164 @@
 }
 EXPORT_SYMBOL(synaptics_rmi4_new_function);
 
+
+static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA)
+{
+	return (regulator_count_voltages(reg) > 0) ?
+		regulator_set_optimum_mode(reg, load_uA) : 0;
+}
+
+static int synaptics_rmi4_regulator_configure(struct synaptics_rmi4_data
+						*rmi4_data, bool on)
+{
+	int retval;
+
+	if (on == false)
+		goto hw_shutdown;
+
+	if (rmi4_data->board->regulator_en) {
+		rmi4_data->vdd = regulator_get(&rmi4_data->i2c_client->dev,
+						"vdd");
+		if (IS_ERR(rmi4_data->vdd)) {
+			dev_err(&rmi4_data->i2c_client->dev,
+					"%s: Failed to get vdd regulator\n",
+					__func__);
+			return PTR_ERR(rmi4_data->vdd);
+		}
+
+		if (regulator_count_voltages(rmi4_data->vdd) > 0) {
+			retval = regulator_set_voltage(rmi4_data->vdd,
+				RMI4_VTG_MIN_UV, RMI4_VTG_MAX_UV);
+			if (retval) {
+				dev_err(&rmi4_data->i2c_client->dev,
+					"regulator set_vtg failed retval=%d\n",
+					retval);
+				goto err_set_vtg_vdd;
+			}
+		}
+	}
+
+	if (rmi4_data->board->i2c_pull_up) {
+		rmi4_data->vcc_i2c = regulator_get(&rmi4_data->i2c_client->dev,
+						"vcc_i2c");
+		if (IS_ERR(rmi4_data->vcc_i2c)) {
+			dev_err(&rmi4_data->i2c_client->dev,
+					"%s: Failed to get i2c regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->vcc_i2c);
+			goto err_get_vtg_i2c;
+		}
+
+		if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) {
+			retval = regulator_set_voltage(rmi4_data->vcc_i2c,
+				RMI4_I2C_VTG_MIN_UV, RMI4_I2C_VTG_MAX_UV);
+			if (retval) {
+				dev_err(&rmi4_data->i2c_client->dev,
+					"reg set i2c vtg failed retval=%d\n",
+					retval);
+			goto err_set_vtg_i2c;
+			}
+		}
+	}
+
+err_set_vtg_i2c:
+	if (rmi4_data->board->i2c_pull_up)
+		regulator_put(rmi4_data->vcc_i2c);
+err_get_vtg_i2c:
+	if (rmi4_data->board->regulator_en)
+		if (regulator_count_voltages(rmi4_data->vdd) > 0)
+			regulator_set_voltage(rmi4_data->vdd, 0,
+				RMI4_VTG_MAX_UV);
+err_set_vtg_vdd:
+	if (rmi4_data->board->regulator_en)
+		regulator_put(rmi4_data->vdd);
+	return retval;
+
+hw_shutdown:
+	if (rmi4_data->board->regulator_en) {
+		if (regulator_count_voltages(rmi4_data->vdd) > 0)
+			regulator_set_voltage(rmi4_data->vdd, 0,
+				RMI4_VTG_MAX_UV);
+		regulator_put(rmi4_data->vdd);
+	}
+	if (rmi4_data->board->i2c_pull_up) {
+		if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0)
+			regulator_set_voltage(rmi4_data->vcc_i2c, 0,
+					RMI4_I2C_VTG_MAX_UV);
+		regulator_put(rmi4_data->vcc_i2c);
+	}
+	return 0;
+};
+
+static int synaptics_rmi4_power_on(struct synaptics_rmi4_data *rmi4_data,
+					bool on) {
+	int retval;
+
+	if (on == false)
+		goto power_off;
+
+	if (rmi4_data->board->regulator_en) {
+		retval = reg_set_optimum_mode_check(rmi4_data->vdd,
+			RMI4_ACTIVE_LOAD_UA);
+		if (retval < 0) {
+			dev_err(&rmi4_data->i2c_client->dev,
+				"Regulator vdd set_opt failed rc=%d\n",
+				retval);
+			return retval;
+		}
+
+		retval = regulator_enable(rmi4_data->vdd);
+		if (retval) {
+			dev_err(&rmi4_data->i2c_client->dev,
+				"Regulator vdd enable failed rc=%d\n",
+				retval);
+			goto error_reg_en_vdd;
+		}
+	}
+
+	if (rmi4_data->board->i2c_pull_up) {
+		retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c,
+			RMI4_I2C_LOAD_UA);
+		if (retval < 0) {
+			dev_err(&rmi4_data->i2c_client->dev,
+				"Regulator vcc_i2c set_opt failed rc=%d\n",
+				retval);
+			goto error_reg_opt_i2c;
+		}
+
+		retval = regulator_enable(rmi4_data->vcc_i2c);
+		if (retval) {
+			dev_err(&rmi4_data->i2c_client->dev,
+				"Regulator vcc_i2c enable failed rc=%d\n",
+				retval);
+			goto error_reg_en_vcc_i2c;
+		}
+	}
+	return 0;
+
+error_reg_en_vcc_i2c:
+	if (rmi4_data->board->i2c_pull_up)
+		reg_set_optimum_mode_check(rmi4_data->vdd, 0);
+error_reg_opt_i2c:
+	if (rmi4_data->board->regulator_en)
+		regulator_disable(rmi4_data->vdd);
+error_reg_en_vdd:
+	if (rmi4_data->board->regulator_en)
+		reg_set_optimum_mode_check(rmi4_data->vdd, 0);
+	return retval;
+
+power_off:
+	if (rmi4_data->board->regulator_en) {
+		reg_set_optimum_mode_check(rmi4_data->vdd, 0);
+		regulator_disable(rmi4_data->vdd);
+	}
+	if (rmi4_data->board->i2c_pull_up) {
+		reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0);
+		regulator_disable(rmi4_data->vcc_i2c);
+	}
+	return 0;
+}
+
  /**
  * synaptics_rmi4_probe()
  *
@@ -1611,7 +1779,7 @@
 static int __devinit synaptics_rmi4_probe(struct i2c_client *client,
 		const struct i2c_device_id *dev_id)
 {
-	int retval;
+	int retval = 0;
 	unsigned char ii;
 	unsigned char attr_count;
 	struct synaptics_rmi4_f1a_handle *f1a;
@@ -1655,18 +1823,6 @@
 		goto err_input_device;
 	}
 
-	if (platform_data->regulator_en) {
-		rmi4_data->regulator = regulator_get(&client->dev, "vdd");
-		if (IS_ERR(rmi4_data->regulator)) {
-			dev_err(&client->dev,
-					"%s: Failed to get regulator\n",
-					__func__);
-			retval = PTR_ERR(rmi4_data->regulator);
-			goto err_regulator;
-		}
-		regulator_enable(rmi4_data->regulator);
-	}
-
 	rmi4_data->i2c_client = client;
 	rmi4_data->current_page = MASK_8BIT;
 	rmi4_data->board = platform_data;
@@ -1679,24 +1835,11 @@
 	rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
 	rmi4_data->reset_device = synaptics_rmi4_reset_device;
 
-	init_waitqueue_head(&rmi4_data->wait);
-	mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
-
-	retval = synaptics_rmi4_query_device(rmi4_data);
-	if (retval < 0) {
-		dev_err(&client->dev,
-				"%s: Failed to query device\n",
-				__func__);
-		goto err_query_device;
-	}
-
-	i2c_set_clientdata(client, rmi4_data);
-
 	rmi4_data->input_dev->name = DRIVER_NAME;
 	rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
 	rmi4_data->input_dev->id.bustype = BUS_I2C;
-	rmi4_data->input_dev->id.product = SYNAPTICS_RMI4_DRIVER_PRODUCT;
-	rmi4_data->input_dev->id.version = SYNAPTICS_RMI4_DRIVER_VERSION;
+	rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
 	rmi4_data->input_dev->dev.parent = &client->dev;
 	input_set_drvdata(rmi4_data->input_dev, rmi4_data);
 
@@ -1727,6 +1870,76 @@
 			rmi4_data->num_of_fingers);
 #endif
 
+	retval = synaptics_rmi4_regulator_configure(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&client->dev, "Failed to configure regulators\n");
+		goto err_reg_configure;
+	}
+
+	retval = synaptics_rmi4_power_on(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&client->dev, "Failed to power on\n");
+		goto err_power_device;
+	}
+
+	if (gpio_is_valid(platform_data->irq_gpio)) {
+		/* configure touchscreen irq gpio */
+		retval = gpio_request(platform_data->irq_gpio, "rmi4_irq_gpio");
+		if (retval) {
+			dev_err(&client->dev, "unable to request gpio [%d]\n",
+						platform_data->irq_gpio);
+			goto err_query_device;
+		}
+		retval = gpio_direction_input(platform_data->irq_gpio);
+		if (retval) {
+			dev_err(&client->dev,
+				"unable to set direction for gpio [%d]\n",
+				platform_data->irq_gpio);
+			goto err_irq_gpio_req;
+		}
+	} else {
+		dev_err(&client->dev, "irq gpio not provided\n");
+		goto err_query_device;
+	}
+
+	if (gpio_is_valid(platform_data->reset_gpio)) {
+		/* configure touchscreen reset out gpio */
+		retval = gpio_request(platform_data->reset_gpio,
+				"rmi4_reset_gpio");
+		if (retval) {
+			dev_err(&client->dev, "unable to request gpio [%d]\n",
+						platform_data->reset_gpio);
+			goto err_irq_gpio_req;
+		}
+
+		retval = gpio_direction_output(platform_data->reset_gpio, 1);
+		if (retval) {
+			dev_err(&client->dev,
+				"unable to set direction for gpio [%d]\n",
+				platform_data->reset_gpio);
+			goto err_reset_gpio_req;
+		}
+
+		gpio_set_value(platform_data->reset_gpio, 0);
+		usleep(RMI4_GPIO_SLEEP_LOW_US);
+		gpio_set_value(platform_data->reset_gpio, 1);
+		msleep(RMI4_GPIO_WAIT_HIGH_MS);
+	}
+
+
+	init_waitqueue_head(&rmi4_data->wait);
+	mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(&client->dev,
+				"%s: Failed to query device\n",
+				__func__);
+		goto err_reset_gpio_req;
+	}
+
+	i2c_set_clientdata(client, rmi4_data);
+
 	f1a = NULL;
 	if (!list_empty(&rmi->support_fn_list)) {
 		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
@@ -1806,12 +2019,6 @@
 	input_unregister_device(rmi4_data->input_dev);
 
 err_register_input:
-err_query_device:
-	if (platform_data->regulator_en) {
-		regulator_disable(rmi4_data->regulator);
-		regulator_put(rmi4_data->regulator);
-	}
-
 	if (!list_empty(&rmi->support_fn_list)) {
 		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
 			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
@@ -1821,11 +2028,19 @@
 			kfree(fhandler);
 		}
 	}
-
-err_regulator:
+err_reset_gpio_req:
+	if (gpio_is_valid(platform_data->reset_gpio))
+		gpio_free(platform_data->reset_gpio);
+err_irq_gpio_req:
+	if (gpio_is_valid(platform_data->irq_gpio))
+		gpio_free(platform_data->irq_gpio);
+err_query_device:
+	synaptics_rmi4_power_on(rmi4_data, false);
+err_power_device:
+	synaptics_rmi4_regulator_configure(rmi4_data, false);
+err_reg_configure:
 	input_free_device(rmi4_data->input_dev);
 	rmi4_data->input_dev = NULL;
-
 err_input_device:
 	kfree(rmi4_data);
 
@@ -1848,8 +2063,6 @@
 	struct synaptics_rmi4_fn *fhandler;
 	struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
 	struct synaptics_rmi4_device_info *rmi;
-	const struct synaptics_rmi4_platform_data *platform_data =
-			rmi4_data->board;
 
 	rmi = &(rmi4_data->rmi4_mod_info);
 
@@ -1869,11 +2082,6 @@
 
 	input_unregister_device(rmi4_data->input_dev);
 
-	if (platform_data->regulator_en) {
-		regulator_disable(rmi4_data->regulator);
-		regulator_put(rmi4_data->regulator);
-	}
-
 	if (!list_empty(&rmi->support_fn_list)) {
 		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
 			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
@@ -1883,7 +2091,14 @@
 			kfree(fhandler);
 		}
 	}
-	input_free_device(rmi4_data->input_dev);
+
+	if (gpio_is_valid(rmi4_data->board->reset_gpio))
+		gpio_free(rmi4_data->board->reset_gpio);
+	if (gpio_is_valid(rmi4_data->board->irq_gpio))
+		gpio_free(rmi4_data->board->irq_gpio);
+
+	synaptics_rmi4_power_on(rmi4_data, false);
+	synaptics_rmi4_regulator_configure(rmi4_data, false);
 
 	kfree(rmi4_data);
 
@@ -2047,8 +2262,6 @@
 static int synaptics_rmi4_suspend(struct device *dev)
 {
 	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
-	const struct synaptics_rmi4_platform_data *platform_data =
-			rmi4_data->board;
 
 	if (!rmi4_data->sensor_sleep) {
 		rmi4_data->touch_stopped = true;
@@ -2057,9 +2270,6 @@
 		synaptics_rmi4_sensor_sleep(rmi4_data);
 	}
 
-	if (platform_data->regulator_en)
-		regulator_disable(rmi4_data->regulator);
-
 	return 0;
 }
 
@@ -2076,11 +2286,6 @@
 static int synaptics_rmi4_resume(struct device *dev)
 {
 	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
-	const struct synaptics_rmi4_platform_data *platform_data =
-			rmi4_data->board;
-
-	if (platform_data->regulator_en)
-		regulator_enable(rmi4_data->regulator);
 
 	synaptics_rmi4_sensor_wake(rmi4_data);
 	rmi4_data->touch_stopped = false;
@@ -2147,4 +2352,3 @@
 MODULE_AUTHOR("Synaptics, Inc.");
 MODULE_DESCRIPTION("Synaptics RMI4 I2C Touch Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(SYNAPTICS_RMI4_DRIVER_VERSION_STRING);
diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi4.h b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
index eb8d5f2..d13f172 100644
--- a/drivers/input/touchscreen/synaptics_i2c_rmi4.h
+++ b/drivers/input/touchscreen/synaptics_i2c_rmi4.h
@@ -5,6 +5,7 @@
  *
  * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
  * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -20,11 +21,10 @@
 #ifndef _SYNAPTICS_DSX_RMI4_H_
 #define _SYNAPTICS_DSX_RMI4_H_
 
-#define SYNAPTICS_RMI4_DS4 0x0001
-#define SYNAPTICS_RMI4_DS5 0x0002
-#define SYNAPTICS_RMI4_DRIVER_PRODUCT SYNAPTICS_RMI4_DS4
-#define SYNAPTICS_RMI4_DRIVER_VERSION 0x1001
-#define SYNAPTICS_RMI4_DRIVER_VERSION_STRING "0x1001"
+#define SYNAPTICS_DS4 (1 << 0)
+#define SYNAPTICS_DS5 (1 << 1)
+#define SYNAPTICS_DSX_DRIVER_PRODUCT SYNAPTICS_DS4
+#define SYNAPTICS_DSX_DRIVER_VERSION 0x1002
 
 #include <linux/version.h>
 #ifdef CONFIG_HAS_EARLYSUSPEND
@@ -188,7 +188,8 @@
 	struct input_dev *input_dev;
 	const struct synaptics_rmi4_platform_data *board;
 	struct synaptics_rmi4_device_info rmi4_mod_info;
-	struct regulator *regulator;
+	struct regulator *vdd;
+	struct regulator *vcc_i2c;
 	struct mutex rmi4_io_ctrl_mutex;
 	struct delayed_work det_work;
 	struct workqueue_struct *det_workqueue;
diff --git a/drivers/input/touchscreen/synaptics_rmi_dev.c b/drivers/input/touchscreen/synaptics_rmi_dev.c
index fbb6f5e..7f1aac5 100644
--- a/drivers/input/touchscreen/synaptics_rmi_dev.c
+++ b/drivers/input/touchscreen/synaptics_rmi_dev.c
@@ -706,5 +706,4 @@
 
 MODULE_AUTHOR("Synaptics, Inc.");
 MODULE_DESCRIPTION("RMI4 RMI_Dev Module");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(SYNAPTICS_RMI4_DRIVER_VERSION_STRING);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b126aa2..db4ec9d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -38,7 +38,7 @@
 
 config MSM_IOMMU_PMON
 	bool "MSM IOMMU Perfomance Monitoring Support"
-	depends on ARCH_MSM8974 && MSM_IOMMU
+	depends on (ARCH_MSM8974 || ARCH_MSM8610 || ARCH_MSM8226) && MSM_IOMMU
 	help
 	  Support for monitoring IOMMUs performance on certain Qualcomm SOCs.
 	  It captures TLB statistics per context bank of the IOMMU as an
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 112b62b..096b53e 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -3,7 +3,7 @@
 ifdef CONFIG_OF
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu-v1.o msm_iommu_dev-v1.o msm_iommu_pagetable.o msm_iommu_sec.o
 endif
-obj-$(CONFIG_MSM_IOMMU_PMON) += msm_iommu_perfmon.o
+obj-$(CONFIG_MSM_IOMMU_PMON) += msm_iommu_perfmon.o msm_iommu_perfmon-v0.o msm_iommu_perfmon-v1.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
diff --git a/drivers/iommu/msm_iommu-v0.c b/drivers/iommu/msm_iommu-v0.c
index 28f1516..6bf0220 100644
--- a/drivers/iommu/msm_iommu-v0.c
+++ b/drivers/iommu/msm_iommu-v0.c
@@ -27,6 +27,7 @@
 #include <asm/cacheflush.h>
 #include <asm/sizes.h>
 
+#include <mach/iommu_perfmon.h>
 #include <mach/iommu_hw-v0.h>
 #include <mach/iommu.h>
 #include <mach/msm_smsm.h>
@@ -160,6 +161,41 @@
 	clk_disable_unprepare(drvdata->pclk);
 }
 
+static int _iommu_power_on(void *data)
+{
+	struct msm_iommu_drvdata *drvdata;
+
+	drvdata = (struct msm_iommu_drvdata *)data;
+	return __enable_clocks(drvdata);
+}
+
+static int _iommu_power_off(void *data)
+{
+	struct msm_iommu_drvdata *drvdata;
+
+	drvdata = (struct msm_iommu_drvdata *)data;
+	__disable_clocks(drvdata);
+	return 0;
+}
+
+static void _iommu_lock_acquire(void)
+{
+	msm_iommu_lock();
+}
+
+static void _iommu_lock_release(void)
+{
+	msm_iommu_unlock();
+}
+
+struct iommu_access_ops iommu_access_ops_v0 = {
+	.iommu_power_on = _iommu_power_on,
+	.iommu_power_off = _iommu_power_off,
+	.iommu_lock_acquire = _iommu_lock_acquire,
+	.iommu_lock_release = _iommu_lock_release,
+};
+EXPORT_SYMBOL(iommu_access_ops_v0);
+
 static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
 {
 	struct msm_priv *priv = domain->priv;
@@ -468,6 +504,11 @@
 	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
 
 	ctx_drvdata->attached_domain = domain;
+
+	mutex_unlock(&msm_iommu_lock);
+
+	msm_iommu_attached(dev->parent);
+	return ret;
 unlock:
 	mutex_unlock(&msm_iommu_lock);
 	return ret;
@@ -481,6 +522,8 @@
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret;
 
+	msm_iommu_detached(dev->parent);
+
 	mutex_lock(&msm_iommu_lock);
 	priv = domain->priv;
 
diff --git a/drivers/iommu/msm_iommu-v1.c b/drivers/iommu/msm_iommu-v1.c
index d15dc65..15a81ed 100644
--- a/drivers/iommu/msm_iommu-v1.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -147,12 +147,13 @@
 	mutex_unlock(&msm_iommu_lock);
 }
 
-struct iommu_access_ops iommu_access_ops = {
+struct iommu_access_ops iommu_access_ops_v1 = {
 	.iommu_power_on = _iommu_power_on,
 	.iommu_power_off = _iommu_power_off,
 	.iommu_lock_acquire = _iommu_lock_acquire,
 	.iommu_lock_release = _iommu_lock_release,
 };
+EXPORT_SYMBOL(iommu_access_ops_v1);
 
 void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata)
 {
diff --git a/drivers/iommu/msm_iommu_dev-v0.c b/drivers/iommu/msm_iommu_dev-v0.c
index 3a9cc23..549800f 100644
--- a/drivers/iommu/msm_iommu_dev-v0.c
+++ b/drivers/iommu/msm_iommu_dev-v0.c
@@ -27,6 +27,7 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 
+#include <mach/iommu_perfmon.h>
 #include <mach/iommu_hw-v0.h>
 #include <mach/iommu.h>
 
@@ -134,6 +135,7 @@
 	struct device_node *child;
 	struct resource *r;
 	u32 glb_offset = 0;
+	int ret;
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!r) {
@@ -142,8 +144,7 @@
 	}
 	drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
 	if (!drvdata->base) {
-		pr_err("%s: Unable to ioremap address %x size %x\n", __func__,
-			r->start, resource_size(r));
+		pr_err("%s: Unable to ioremap %pr\n", __func__, r);
 		return -ENOMEM;
 	}
 	drvdata->glb_base = drvdata->base;
@@ -162,7 +163,12 @@
 			pr_err("Failed to create %s device\n", child->name);
 	}
 
-	drvdata->name = dev_name(&pdev->dev);
+	ret = of_property_read_string(pdev->dev.of_node, "label",
+			&drvdata->name);
+	if (ret) {
+		pr_err("%s: Missing property label\n", __func__);
+		return -EINVAL;
+	}
 	drvdata->sec_id = -1;
 	drvdata->ttbr_split = 0;
 #endif
@@ -255,8 +261,68 @@
 	return ret;
 }
 
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+					struct iommu_pmon *pmon_info)
+{
+	int ret = 0;
+	int irq = platform_get_irq(pdev, 0);
+	unsigned int cls_prop_size;
+
+	if (irq > 0) {
+		pmon_info->iommu.evt_irq = platform_get_irq(pdev, 0);
+
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,iommu-pmu-ngroups",
+					   &pmon_info->num_groups);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-ngroups\n");
+			goto fail;
+		}
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,iommu-pmu-ncounters",
+					   &pmon_info->num_counters);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-ncounters\n");
+			goto fail;
+		}
+
+		if (!of_get_property(pdev->dev.of_node,
+				     "qcom,iommu-pmu-event-classes",
+				     &cls_prop_size)) {
+			pr_err("Error reading qcom,iommu-pmu-event-classes\n");
+			return -EINVAL;
+		}
+
+		pmon_info->event_cls_supported =
+			   devm_kzalloc(&pdev->dev, cls_prop_size, GFP_KERNEL);
+
+		if (!pmon_info->event_cls_supported) {
+			pr_err("Unable to get memory for event class array\n");
+			return -ENOMEM;
+		}
+
+		pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+		ret = of_property_read_u32_array(pdev->dev.of_node,
+					"qcom,iommu-pmu-event-classes",
+					pmon_info->event_cls_supported,
+					pmon_info->nevent_cls_supported);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-event-classes\n");
+			return ret;
+		}
+	} else {
+		pmon_info->iommu.evt_irq = -1;
+		ret = irq;
+	}
+
+fail:
+	return ret;
+}
+
 static int msm_iommu_probe(struct platform_device *pdev)
 {
+	struct iommu_pmon *pmon_info;
 	struct msm_iommu_drvdata *drvdata;
 	struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
 	int ret;
@@ -288,8 +354,7 @@
 
 		r2 = request_mem_region(r->start, len, r->name);
 		if (!r2) {
-			pr_err("Could not request memory region: start=%p, len=%d\n",
-							(void *) r->start, len);
+			pr_err("Could not request memory region: %pr\n", r);
 			ret = -EBUSY;
 			goto fail;
 		}
@@ -297,8 +362,7 @@
 		drvdata->base = devm_ioremap(&pdev->dev, r2->start, len);
 
 		if (!drvdata->base) {
-			pr_err("Could not ioremap: start=%p, len=%d\n",
-				 (void *) r2->start, len);
+			pr_err("Could not ioremap: %pr\n", r);
 			ret = -EBUSY;
 			goto fail;
 		}
@@ -339,6 +403,29 @@
 
 	__disable_clocks(drvdata);
 
+	pmon_info = msm_iommu_pm_alloc(&pdev->dev);
+	if (pmon_info != NULL) {
+		ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+		if (ret) {
+			msm_iommu_pm_free(&pdev->dev);
+			pr_info("%s: pmon not available.\n", drvdata->name);
+		} else {
+			pmon_info->iommu.base = drvdata->base;
+			pmon_info->iommu.ops = &iommu_access_ops_v0;
+			pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v0();
+			pmon_info->iommu.iommu_name = drvdata->name;
+			ret = msm_iommu_pm_iommu_register(pmon_info);
+			if (ret) {
+				pr_err("%s iommu register fail\n",
+								drvdata->name);
+				msm_iommu_pm_free(&pdev->dev);
+			} else {
+				pr_debug("%s iommu registered for pmon\n",
+						pmon_info->iommu.iommu_name);
+			}
+		}
+	}
+
 	return 0;
 
 fail_clk:
@@ -376,7 +463,7 @@
 		ret = request_threaded_irq(irq, NULL,
 				msm_iommu_fault_handler,
 				IRQF_ONESHOT | IRQF_SHARED,
-				"msm_iommu_nonsecure_irq", pdev);
+				"msm_iommu_nonsecure_irq", ctx_drvdata);
 		if (ret) {
 			pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
 			return ret;
diff --git a/drivers/iommu/msm_iommu_dev-v1.c b/drivers/iommu/msm_iommu_dev-v1.c
index 02fd133..3f9f1c4 100644
--- a/drivers/iommu/msm_iommu_dev-v1.c
+++ b/drivers/iommu/msm_iommu_dev-v1.c
@@ -34,7 +34,7 @@
 {
 	struct msm_iommu_bfb_settings *bfb_settings;
 	u32 nreg, nval;
-	int ret, i;
+	int ret;
 
 	/*
 	 * It is not valid for a device to have the qcom,iommu-bfb-regs
@@ -80,11 +80,6 @@
 
 	bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
 
-	for (i = 0; i < bfb_settings->length; i++)
-		if (bfb_settings->regs[i] < IMPLDEF_OFFSET ||
-		    bfb_settings->regs[i] >= IMPLDEF_OFFSET + IMPLDEF_LENGTH)
-			return -EINVAL;
-
 	drvdata->bfb_settings = bfb_settings;
 	return 0;
 }
@@ -123,8 +118,8 @@
 		drvdata->clk_reg_virt = devm_ioremap(&pdev->dev, r->start,
 						     resource_size(r));
 		if (!drvdata->clk_reg_virt) {
-			pr_err("Failed to map 0x%x for iommu clk\n",
-				r->start);
+			pr_err("Failed to map resource for iommu clk: %pr\n",
+				r);
 			ret = -ENOMEM;
 			goto fail;
 		}
@@ -269,7 +264,8 @@
 			pr_info("%s: pmon not available.\n", drvdata->name);
 		} else {
 			pmon_info->iommu.base = drvdata->base;
-			pmon_info->iommu.ops = &iommu_access_ops;
+			pmon_info->iommu.ops = &iommu_access_ops_v1;
+			pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
 			pmon_info->iommu.iommu_name = drvdata->name;
 			ret = msm_iommu_pm_iommu_register(pmon_info);
 			if (ret) {
diff --git a/drivers/iommu/msm_iommu_pagetable.c b/drivers/iommu/msm_iommu_pagetable.c
index 2ee9ba6..99841cd 100644
--- a/drivers/iommu/msm_iommu_pagetable.c
+++ b/drivers/iommu/msm_iommu_pagetable.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -110,6 +110,90 @@
 	return pgprot;
 }
 
+static unsigned long *make_second_level(struct iommu_pt *pt,
+					unsigned long *fl_pte)
+{
+	unsigned long *sl;
+	sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
+			get_order(SZ_4K));
+
+	if (!sl) {
+		pr_debug("Could not allocate second level table\n");
+		goto fail;
+	}
+	memset(sl, 0, SZ_4K);
+	clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
+
+	*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+			FL_TYPE_TABLE);
+
+	clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+fail:
+	return sl;
+}
+
+static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+	int ret = 0;
+
+	if (*sl_pte) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+		| SL_TYPE_SMALL | pgprot;
+fail:
+	return ret;
+}
+
+static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+	int ret = 0;
+
+	int i;
+
+	for (i = 0; i < 16; i++)
+		if (*(sl_pte+i)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+
+	for (i = 0; i < 16; i++)
+		*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+				| SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+	return ret;
+}
+
+static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
+{
+	if (*fl_pte)
+		return -EBUSY;
+
+	*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+		| pgprot;
+
+	return 0;
+}
+
+static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
+{
+	int i;
+	int ret = 0;
+	for (i = 0; i < 16; i++)
+		if (*(fl_pte+i)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+	for (i = 0; i < 16; i++)
+		*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+			| FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+	return ret;
+}
+
 int msm_iommu_pagetable_map(struct iommu_pt *pt, unsigned long va,
 			phys_addr_t pa, size_t len, int prot)
 {
@@ -144,28 +228,16 @@
 	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
 
 	if (len == SZ_16M) {
-		int i = 0;
-
-		for (i = 0; i < 16; i++)
-			if (*(fl_pte+i)) {
-				ret = -EBUSY;
-				goto fail;
-			}
-
-		for (i = 0; i < 16; i++)
-			*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
-				  FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+		ret = fl_16m(fl_pte, pa, pgprot);
+		if (ret)
+			goto fail;
 		clean_pte(fl_pte, fl_pte + 16, pt->redirect);
 	}
 
 	if (len == SZ_1M) {
-		if (*fl_pte) {
-			ret = -EBUSY;
+		ret = fl_1m(fl_pte, pa, pgprot);
+		if (ret)
 			goto fail;
-		}
-
-		*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT
-					| FL_SHARED | pgprot;
 		clean_pte(fl_pte, fl_pte + 1, pt->redirect);
 	}
 
@@ -173,21 +245,10 @@
 	if (len == SZ_4K || len == SZ_64K) {
 
 		if (*fl_pte == 0) {
-			unsigned long *sl;
-			sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
-							get_order(SZ_4K));
-
-			if (!sl) {
-				pr_debug("Could not allocate second level table\n");
+			if (make_second_level(pt, fl_pte) == NULL) {
 				ret = -ENOMEM;
 				goto fail;
 			}
-			memset(sl, 0, SZ_4K);
-			clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
-
-			*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
-						      FL_TYPE_TABLE);
-			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
 		}
 
 		if (!(*fl_pte & FL_TYPE_TABLE)) {
@@ -201,29 +262,16 @@
 	sl_pte = sl_table + sl_offset;
 
 	if (len == SZ_4K) {
-		if (*sl_pte) {
-			ret = -EBUSY;
+		ret = sl_4k(sl_pte, pa, pgprot);
+		if (ret)
 			goto fail;
-		}
-
-		*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
-						| SL_TYPE_SMALL | pgprot;
 		clean_pte(sl_pte, sl_pte + 1, pt->redirect);
 	}
 
 	if (len == SZ_64K) {
-		int i;
-
-		for (i = 0; i < 16; i++)
-			if (*(sl_pte+i)) {
-				ret = -EBUSY;
-				goto fail;
-			}
-
-		for (i = 0; i < 16; i++)
-			*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
-					| SL_SHARED | SL_TYPE_LARGE | pgprot;
-
+		ret = sl_64k(sl_pte, pa, pgprot);
+		if (ret)
+			goto fail;
 		clean_pte(sl_pte, sl_pte + 16, pt->redirect);
 	}
 
@@ -322,64 +370,99 @@
 	return pa;
 }
 
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+				   int align)
+{
+	return  IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+		&& (len >= align);
+}
+
 int msm_iommu_pagetable_map_range(struct iommu_pt *pt, unsigned int va,
 		       struct scatterlist *sg, unsigned int len, int prot)
 {
 	unsigned int pa;
 	unsigned int offset = 0;
-	unsigned int pgprot;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
-	unsigned long *sl_table;
+	unsigned long *sl_table = NULL;
 	unsigned long sl_offset, sl_start;
-	unsigned int chunk_offset = 0;
-	unsigned int chunk_pa;
+	unsigned int chunk_size, chunk_offset = 0;
 	int ret = 0;
+	unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
 
 	BUG_ON(len & (SZ_4K - 1));
 
-	pgprot = __get_pgprot(prot, SZ_4K);
-	if (!pgprot) {
+	pgprot4k = __get_pgprot(prot, SZ_4K);
+	pgprot64k = __get_pgprot(prot, SZ_64K);
+	pgprot1m = __get_pgprot(prot, SZ_1M);
+	pgprot16m = __get_pgprot(prot, SZ_16M);
+	if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
 		ret = -EINVAL;
 		goto fail;
 	}
 
 	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
 	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
-
-	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
-	sl_offset = SL_OFFSET(va);
-
-	chunk_pa = get_phys_addr(sg);
-	if (chunk_pa == 0) {
-		pr_debug("No dma address for sg %p\n", sg);
-		ret = -EINVAL;
-		goto fail;
-	}
+	pa = get_phys_addr(sg);
 
 	while (offset < len) {
-		/* Set up a 2nd level page table if one doesn't exist */
-		if (*fl_pte == 0) {
-			sl_table = (unsigned long *)
-				 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
+		chunk_size = SZ_4K;
 
-			if (!sl_table) {
-				pr_debug("Could not allocate second level table\n");
+		if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+				     SZ_16M))
+			chunk_size = SZ_16M;
+		else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+					  SZ_1M))
+			chunk_size = SZ_1M;
+		/* 64k or 4k determined later */
+
+		/* for 1M and 16M, only first level entries are required */
+		if (chunk_size >= SZ_1M) {
+			if (chunk_size == SZ_16M) {
+				ret = fl_16m(fl_pte, pa, pgprot16m);
+				if (ret)
+					goto fail;
+				clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+				fl_pte += 16;
+			} else if (chunk_size == SZ_1M) {
+				ret = fl_1m(fl_pte, pa, pgprot1m);
+				if (ret)
+					goto fail;
+				clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+				fl_pte++;
+			}
+
+			offset += chunk_size;
+			chunk_offset += chunk_size;
+			va += chunk_size;
+			pa += chunk_size;
+
+			if (chunk_offset >= sg->length && offset < len) {
+				chunk_offset = 0;
+				sg = sg_next(sg);
+				pa = get_phys_addr(sg);
+				if (pa == 0) {
+					pr_debug("No dma address for sg %p\n",
+							sg);
+					ret = -EINVAL;
+					goto fail;
+				}
+			}
+			continue;
+		}
+		/* for 4K or 64K, make sure there is a second level table */
+		if (*fl_pte == 0) {
+			if (!make_second_level(pt, fl_pte)) {
 				ret = -ENOMEM;
 				goto fail;
 			}
-
-			memset(sl_table, 0, SZ_4K);
-			clean_pte(sl_table, sl_table + NUM_SL_PTE,
-					pt->redirect);
-
-			*fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
-							    FL_TYPE_TABLE);
-			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
-		} else
-			sl_table = (unsigned long *)
-					       __va(((*fl_pte) & FL_BASE_MASK));
-
+		}
+		if (!(*fl_pte & FL_TYPE_TABLE)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+		sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+		sl_offset = SL_OFFSET(va);
 		/* Keep track of initial position so we
 		 * don't clean more than we have to
 		 */
@@ -387,21 +470,38 @@
 
 		/* Build the 2nd level page table */
 		while (offset < len && sl_offset < NUM_SL_PTE) {
-			pa = chunk_pa + chunk_offset;
-			sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
-			      pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
-			sl_offset++;
-			offset += SZ_4K;
+			/* Map a large 64K page if the chunk is large enough and
+			 * the pa and va are aligned
+			 */
 
-			chunk_offset += SZ_4K;
+			if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+					     SZ_64K))
+				chunk_size = SZ_64K;
+			else
+				chunk_size = SZ_4K;
+
+			if (chunk_size == SZ_4K) {
+				sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+				sl_offset++;
+			} else {
+				BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+				sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+				sl_offset += 16;
+			}
+
+
+			offset += chunk_size;
+			chunk_offset += chunk_size;
+			va += chunk_size;
+			pa += chunk_size;
 
 			if (chunk_offset >= sg->length && offset < len) {
 				chunk_offset = 0;
 				sg = sg_next(sg);
-				chunk_pa = get_phys_addr(sg);
-				if (chunk_pa == 0) {
+				pa = get_phys_addr(sg);
+				if (pa == 0) {
 					pr_debug("No dma address for sg %p\n",
-						sg);
+							sg);
 					ret = -EINVAL;
 					goto fail;
 				}
@@ -433,44 +533,53 @@
 	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
 	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
 
-	sl_start = SL_OFFSET(va);
-
 	while (offset < len) {
-		sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
-		sl_end = ((len - offset) / SZ_4K) + sl_start;
+		if (*fl_pte & FL_TYPE_TABLE) {
+			sl_start = SL_OFFSET(va);
+			sl_table =  __va(((*fl_pte) & FL_BASE_MASK));
+			sl_end = ((len - offset) / SZ_4K) + sl_start;
 
-		if (sl_end > NUM_SL_PTE)
-			sl_end = NUM_SL_PTE;
+			if (sl_end > NUM_SL_PTE)
+				sl_end = NUM_SL_PTE;
 
-		memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
-		clean_pte(sl_table + sl_start, sl_table + sl_end,
-				pt->redirect);
+			memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
+			clean_pte(sl_table + sl_start, sl_table + sl_end,
+					pt->redirect);
 
-		offset += (sl_end - sl_start) * SZ_4K;
+			offset += (sl_end - sl_start) * SZ_4K;
+			va += (sl_end - sl_start) * SZ_4K;
 
-		/* Unmap and free the 2nd level table if all mappings in it
-		 * were removed. This saves memory, but the table will need
-		 * to be re-allocated the next time someone tries to map these
-		 * VAs.
-		 */
-		used = 0;
+			/* Unmap and free the 2nd level table if all mappings
+			 * in it were removed. This saves memory, but the table
+			 * will need to be re-allocated the next time someone
+			 * tries to map these VAs.
+			 */
+			used = 0;
 
-		/* If we just unmapped the whole table, don't bother
-		 * seeing if there are still used entries left.
-		 */
-		if (sl_end - sl_start != NUM_SL_PTE)
-			for (i = 0; i < NUM_SL_PTE; i++)
-				if (sl_table[i]) {
-					used = 1;
-					break;
-				}
-		if (!used) {
-			free_page((unsigned long)sl_table);
+			/* If we just unmapped the whole table, don't bother
+			 * seeing if there are still used entries left.
+			 */
+			if (sl_end - sl_start != NUM_SL_PTE)
+				for (i = 0; i < NUM_SL_PTE; i++)
+					if (sl_table[i]) {
+						used = 1;
+						break;
+					}
+			if (!used) {
+				free_page((unsigned long)sl_table);
+				*fl_pte = 0;
+
+				clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+			}
+
+			sl_start = 0;
+		} else {
 			*fl_pte = 0;
 			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+			va += SZ_1M;
+			offset += SZ_1M;
+			sl_start = 0;
 		}
-
-		sl_start = 0;
 		fl_pte++;
 	}
 }
diff --git a/drivers/iommu/msm_iommu_perfmon-v0.c b/drivers/iommu/msm_iommu_perfmon-v0.c
new file mode 100644
index 0000000..c80d1e5
--- /dev/null
+++ b/drivers/iommu/msm_iommu_perfmon-v0.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * This file contains the part of the IOMMUv0 PMU driver that actually touches
+ * IOMMU PMU registers.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <mach/iommu_hw-v0.h>
+#include <mach/iommu_perfmon.h>
+
+#define PM_RESET_MASK		(0xF)
+#define PM_RESET_SHIFT		(0x8)
+#define PM_RESET		(PM_RESET_MASK << PM_RESET_SHIFT)
+
+#define PM_ENABLE_MASK		(0x1)
+#define PM_ENABLE_SHIFT		(0x0)
+#define PM_ENABLE		(PM_ENABLE_MASK << PM_ENABLE_SHIFT)
+
+#define PM_OVFL_FLAG_MASK	(0xF)
+#define PM_OVFL_FLAG_SHIFT	(0x0)
+#define PM_OVFL_FLAG		(PM_OVFL_FLAG_MASK << PM_OVFL_FLAG_SHIFT)
+
+#define PM_EVENT_TYPE_MASK	(0x1F)
+#define PM_EVENT_TYPE_SHIFT	(0x2)
+#define PM_EVENT_TYPE		(PM_EVENT_TYPE_MASK << PM_EVENT_TYPE_SHIFT)
+
+#define PM_INT_EN_MASK		(0x1)
+#define PM_INT_EN_SHIFT		(0x0)
+#define PM_INT_EN		(PM_INT_EN_MASK << PM_INT_EN_SHIFT)
+
+#define PM_INT_POL_MASK		(0x1)
+#define PM_INT_POL_SHIFT	(0x2)
+#define PM_INT_ACTIVE_HIGH	(0x1)
+
+#define PMEVCNTR_(n)		(EMC_N + n*4)
+#define PMEVTYPER_(n)		(EMCC_N + n*4)
+
+/**
+ * Translate between SMMUv0 event classes and standard ARM SMMU event classes
+ */
+static int iommu_pm_event_class_translation_table[] = {
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x8,
+	0x9,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x80,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x12,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x10,
+};
+
+static int iommu_pm_translate_event_class(int event_class)
+{
+	const unsigned int TBL_LEN =
+			ARRAY_SIZE(iommu_pm_event_class_translation_table);
+	unsigned int i;
+
+	if (event_class < 0)
+		return event_class;
+
+	for (i = 0; i < TBL_LEN; ++i) {
+		if (iommu_pm_event_class_translation_table[i] == event_class)
+			return i;
+	}
+	return MSM_IOMMU_PMU_NO_EVENT_CLASS;
+}
+
+static unsigned int iommu_pm_is_hw_access_OK(const struct iommu_pmon *pmon)
+{
+	/*
+	 * IOMMUv0 is in always ON domain so we don't care whether we are
+	 * attached or not. We only care whether the PMU is enabled or
+	 * not meaning clocks are turned on.
+	 */
+	return pmon->enabled;
+}
+
+static void iommu_pm_grp_enable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	/* No group concept in v0. */
+}
+
+static void iommu_pm_grp_disable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	/* No group concept in v0. */
+}
+
+static void iommu_pm_set_int_active_high(const struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc |= (PM_INT_ACTIVE_HIGH & PM_INT_POL_MASK) << PM_INT_POL_SHIFT;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_enable(struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc |= PM_ENABLE;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_disable(struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc &= ~PM_ENABLE;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_reset_counters(const struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc |= PM_RESET;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_check_for_overflow(struct iommu_pmon *pmon)
+{
+	struct iommu_pmon_counter *counter;
+	struct iommu_info *iommu = &pmon->iommu;
+	unsigned int reg_value;
+	unsigned int j;
+	struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[0];
+
+	reg_value = readl_relaxed(iommu->base + EMCS);
+	reg_value &= PM_OVFL_FLAG;
+
+	for (j = 0; j < cnt_grp->num_counters; ++j) {
+		counter = &cnt_grp->counters[j];
+
+		if (counter->enabled) {
+			if (reg_value & (1 << counter->absolute_counter_no))
+				counter->overflow_count++;
+		}
+	}
+
+	/* Clear overflow */
+	writel_relaxed(reg_value, iommu->base + EMCS);
+}
+
+static irqreturn_t iommu_pm_evt_ovfl_int_handler(int irq, void *dev_id)
+{
+	struct iommu_pmon *pmon = dev_id;
+	struct iommu_info *iommu = &pmon->iommu;
+
+	mutex_lock(&pmon->lock);
+
+	if (!iommu_pm_is_hw_access_OK(pmon)) {
+		mutex_unlock(&pmon->lock);
+		goto out;
+	}
+
+	iommu->ops->iommu_lock_acquire();
+	iommu_pm_check_for_overflow(pmon);
+	iommu->ops->iommu_lock_release();
+
+	mutex_unlock(&pmon->lock);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static void iommu_pm_counter_enable(struct iommu_info *iommu,
+				    struct iommu_pmon_counter *counter)
+{
+	unsigned int bit_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Clear overflow of counter */
+	reg_value = readl_relaxed(iommu->base + EMCS);
+	reg_value &= (1 << bit_no);
+	writel_relaxed(reg_value, iommu->base + EMCS);
+
+	/* Enable counter */
+	counter->enabled = 1;
+}
+
+static void iommu_pm_counter_disable(struct iommu_info *iommu,
+				     struct iommu_pmon_counter *counter)
+{
+	unsigned int bit_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Disable counter */
+	counter->enabled = 0;
+
+	/* Clear overflow of counter */
+	reg_value = readl_relaxed(iommu->base + EMCS);
+	reg_value &= (1 << bit_no);
+	writel_relaxed(reg_value, iommu->base + EMCS);
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_enable(struct iommu_info *iommu,
+				     const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Enable overflow interrupt for counter */
+	reg_value = readl_relaxed(iommu->base + PMEVTYPER_(reg_no));
+	reg_value |= PM_INT_EN;
+	writel_relaxed(reg_value, iommu->base + PMEVTYPER_(reg_no));
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_disable(struct iommu_info *iommu,
+				      const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Disable overflow interrupt for counter */
+	reg_value = readl_relaxed(iommu->base + PMEVTYPER_(reg_no));
+	reg_value &= ~PM_INT_EN;
+	writel_relaxed(reg_value, iommu->base + PMEVTYPER_(reg_no));
+}
+
+static void iommu_pm_set_event_class(struct iommu_pmon *pmon,
+				    unsigned int count_no,
+				    unsigned int event_class)
+{
+	unsigned int reg_no = count_no;
+	unsigned int reg_value;
+	int event = iommu_pm_translate_event_class(event_class);
+
+	if (event == MSM_IOMMU_PMU_NO_EVENT_CLASS)
+		event = 0;
+
+	reg_value = readl_relaxed(pmon->iommu.base + PMEVTYPER_(reg_no));
+	reg_value &= ~(PM_EVENT_TYPE_MASK << PM_EVENT_TYPE_SHIFT);
+	reg_value |= (event & PM_EVENT_TYPE_MASK) << PM_EVENT_TYPE_SHIFT;
+	writel_relaxed(reg_value, pmon->iommu.base + PMEVTYPER_(reg_no));
+}
+
+static unsigned int iommu_pm_read_counter(struct iommu_pmon_counter *counter)
+{
+	struct iommu_pmon *pmon = counter->cnt_group->pmon;
+	struct iommu_info *info = &pmon->iommu;
+	unsigned int cnt_no = counter->absolute_counter_no;
+	return readl_relaxed(info->base + PMEVCNTR_(cnt_no));
+}
+
+static void iommu_pm_initialize_hw(const struct iommu_pmon *pmon)
+{
+	const struct iommu_info *iommu = &pmon->iommu;
+	struct msm_iommu_drvdata *iommu_drvdata =
+					dev_get_drvdata(iommu->iommu_dev);
+
+	/* This is called during bootup device initialization so no need
+	 * for locking here.
+	 */
+	iommu->ops->iommu_power_on(iommu_drvdata);
+	iommu_pm_set_int_active_high(iommu);
+	iommu->ops->iommu_power_off(iommu_drvdata);
+}
+
+static struct iommu_pm_hw_ops iommu_pm_hw_ops = {
+	.initialize_hw = iommu_pm_initialize_hw,
+	.is_hw_access_OK = iommu_pm_is_hw_access_OK,
+	.grp_enable = iommu_pm_grp_enable,
+	.grp_disable = iommu_pm_grp_disable,
+	.enable_pm = iommu_pm_enable,
+	.disable_pm = iommu_pm_disable,
+	.reset_counters = iommu_pm_reset_counters,
+	.check_for_overflow = iommu_pm_check_for_overflow,
+	.evt_ovfl_int_handler = iommu_pm_evt_ovfl_int_handler,
+	.counter_enable = iommu_pm_counter_enable,
+	.counter_disable = iommu_pm_counter_disable,
+	.ovfl_int_enable = iommu_pm_ovfl_int_enable,
+	.ovfl_int_disable = iommu_pm_ovfl_int_disable,
+	.set_event_class = iommu_pm_set_event_class,
+	.read_counter = iommu_pm_read_counter,
+};
+
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+	return &iommu_pm_hw_ops;
+}
+EXPORT_SYMBOL(iommu_pm_get_hw_ops_v0);
+
diff --git a/drivers/iommu/msm_iommu_perfmon-v1.c b/drivers/iommu/msm_iommu_perfmon-v1.c
new file mode 100644
index 0000000..d76ee7f
--- /dev/null
+++ b/drivers/iommu/msm_iommu_perfmon-v1.c
@@ -0,0 +1,269 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * This file contains the part of the IOMMUv1 PMU driver that actually touches
+ * IOMMU PMU registers.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <mach/iommu_hw-v1.h>
+#include <mach/iommu_perfmon.h>
+
+#define PMCR_P_MASK		(0x1)
+#define PMCR_P_SHIFT		(1)
+#define PMCR_P			(PMCR_P_MASK << PMCR_P_SHIFT)
+#define PMCFGR_NCG_MASK		(0xFF)
+#define PMCFGR_NCG_SHIFT	(24)
+#define PMCFGR_NCG		(PMCFGR_NCG_MASK << PMCFGR_NCG_SHIFT)
+#define PMCFGR_N_MASK		(0xFF)
+#define PMCFGR_N_SHIFT		(0)
+#define PMCFGR_N		(PMCFGR_N_MASK << PMCFGR_N_SHIFT)
+#define CR_E			0x1
+#define CGCR_CEN		0x800
+#define CGCR_CEN_SHFT		(1 << 11)
+#define PMCGCR_CGNC_MASK	(0x0F)
+#define PMCGCR_CGNC_SHIFT	(24)
+#define PMCGCR_CGNC		(PMCGCR_CGNC_MASK << PMCGCR_CGNC_SHIFT)
+#define PMCGCR_(group)		(PMCGCR_N + group*4)
+
+#define PMOVSCLR_(n)		(PMOVSCLR_N + n*4)
+#define PMCNTENSET_(n)		(PMCNTENSET_N + n*4)
+#define PMCNTENCLR_(n)		(PMCNTENCLR_N + n*4)
+#define PMINTENSET_(n)		(PMINTENSET_N + n*4)
+#define PMINTENCLR_(n)		(PMINTENCLR_N + n*4)
+
+#define PMEVCNTR_(n)		(PMEVCNTR_N + n*4)
+#define PMEVTYPER_(n)		(PMEVTYPER_N + n*4)
+
+
+static unsigned int iommu_pm_is_hw_access_OK(const struct iommu_pmon *pmon)
+{
+	/*
+	 * IOMMUv1 is not in the always on domain so we need to make sure
+	 * the regulators are turned on in addition to clocks before we allow
+	 * access to the hardware thus we check if we have attached to the
+	 * IOMMU in addition to checking if we have enabled PMU.
+	 */
+	return pmon->enabled && (pmon->iommu_attach_count > 0);
+}
+
+static void iommu_pm_grp_enable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	unsigned int pmcgcr;
+	pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
+	pmcgcr |= CGCR_CEN;
+	writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
+}
+
+static void iommu_pm_grp_disable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	unsigned int pmcgcr;
+	pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
+	pmcgcr &= ~CGCR_CEN;
+	writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
+}
+
+static void iommu_pm_enable(struct iommu_info *iommu)
+{
+	unsigned int pmcr;
+	pmcr = readl_relaxed(iommu->base + PMCR);
+	pmcr |= CR_E;
+	writel_relaxed(pmcr, iommu->base + PMCR);
+}
+
+static void iommu_pm_disable(struct iommu_info *iommu)
+{
+	unsigned int pmcr;
+	pmcr = readl_relaxed(iommu->base + PMCR);
+	pmcr &= ~CR_E;
+	writel_relaxed(pmcr, iommu->base + PMCR);
+}
+
+static void iommu_pm_reset_counters(const struct iommu_info *iommu)
+{
+	unsigned int pmcr;
+	pmcr = readl_relaxed(iommu->base + PMCR);
+	pmcr |= PMCR_P;
+	writel_relaxed(pmcr, iommu->base + PMCR);
+}
+
+static void iommu_pm_check_for_overflow(struct iommu_pmon *pmon)
+{
+	struct iommu_pmon_counter *counter;
+	struct iommu_info *iommu = &pmon->iommu;
+	unsigned int reg_no = 0;
+	unsigned int bit_no;
+	unsigned int reg_value;
+	unsigned int i;
+	unsigned int j;
+	unsigned int curr_reg = 0;
+
+	reg_value = readl_relaxed(iommu->base + PMOVSCLR_(curr_reg));
+
+	for (i = 0; i < pmon->num_groups; ++i) {
+		struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
+		for (j = 0; j < cnt_grp->num_counters; ++j) {
+			counter = &cnt_grp->counters[j];
+			reg_no = counter->absolute_counter_no / 32;
+			bit_no = counter->absolute_counter_no % 32;
+			if (reg_no != curr_reg) {
+				/* Clear overflow bits */
+				writel_relaxed(reg_value, iommu->base +
+					       PMOVSCLR_(reg_no));
+				curr_reg = reg_no;
+				reg_value = readl_relaxed(iommu->base +
+							  PMOVSCLR_(curr_reg));
+			}
+
+			if (counter->enabled) {
+				if (reg_value & (1 << bit_no))
+					counter->overflow_count++;
+			}
+		}
+	}
+
+	/* Clear overflow */
+	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
+}
+
+static irqreturn_t iommu_pm_evt_ovfl_int_handler(int irq, void *dev_id)
+{
+	struct iommu_pmon *pmon = dev_id;
+	struct iommu_info *iommu = &pmon->iommu;
+
+	mutex_lock(&pmon->lock);
+
+	if (!iommu_pm_is_hw_access_OK(pmon)) {
+		mutex_unlock(&pmon->lock);
+		goto out;
+	}
+
+	iommu->ops->iommu_lock_acquire();
+	iommu_pm_check_for_overflow(pmon);
+	iommu->ops->iommu_lock_release();
+
+	mutex_unlock(&pmon->lock);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static void iommu_pm_counter_enable(struct iommu_info *iommu,
+				    struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	/* Clear overflow of counter */
+	reg_value = 1 << bit_no;
+	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
+
+	/* Enable counter */
+	writel_relaxed(reg_value, iommu->base + PMCNTENSET_(reg_no));
+	counter->enabled = 1;
+}
+
+static void iommu_pm_counter_disable(struct iommu_info *iommu,
+				     struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	counter->enabled = 0;
+
+	/* Disable counter */
+	reg_value = 1 << bit_no;
+	writel_relaxed(reg_value, iommu->base + PMCNTENCLR_(reg_no));
+
+	/* Clear overflow of counter */
+	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_enable(struct iommu_info *iommu,
+				     const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	/* Enable overflow interrupt for counter */
+	reg_value = (1 << bit_no);
+	writel_relaxed(reg_value, iommu->base + PMINTENSET_(reg_no));
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_disable(struct iommu_info *iommu,
+				      const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	/* Disable overflow interrupt for counter */
+	reg_value = 1 << bit_no;
+	writel_relaxed(reg_value, iommu->base + PMINTENCLR_(reg_no));
+}
+
+static void iommu_pm_set_event_class(struct iommu_pmon *pmon,
+				    unsigned int count_no,
+				    unsigned int event_class)
+{
+	writel_relaxed(event_class, pmon->iommu.base + PMEVTYPER_(count_no));
+}
+
+static unsigned int iommu_pm_read_counter(struct iommu_pmon_counter *counter)
+{
+	struct iommu_pmon *pmon = counter->cnt_group->pmon;
+	struct iommu_info *info = &pmon->iommu;
+	unsigned int cnt_no = counter->absolute_counter_no;
+	return readl_relaxed(info->base + PMEVCNTR_(cnt_no));
+}
+
+static void iommu_pm_initialize_hw(const struct iommu_pmon *pmon)
+{
+	/* No initialization needed */
+}
+
+static struct iommu_pm_hw_ops iommu_pm_hw_ops = {
+	.initialize_hw = iommu_pm_initialize_hw,
+	.is_hw_access_OK = iommu_pm_is_hw_access_OK,
+	.grp_enable = iommu_pm_grp_enable,
+	.grp_disable = iommu_pm_grp_disable,
+	.enable_pm = iommu_pm_enable,
+	.disable_pm = iommu_pm_disable,
+	.reset_counters = iommu_pm_reset_counters,
+	.check_for_overflow = iommu_pm_check_for_overflow,
+	.evt_ovfl_int_handler = iommu_pm_evt_ovfl_int_handler,
+	.counter_enable = iommu_pm_counter_enable,
+	.counter_disable = iommu_pm_counter_disable,
+	.ovfl_int_enable = iommu_pm_ovfl_int_enable,
+	.ovfl_int_disable = iommu_pm_ovfl_int_disable,
+	.set_event_class = iommu_pm_set_event_class,
+	.read_counter = iommu_pm_read_counter,
+};
+
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+	return &iommu_pm_hw_ops;
+}
+EXPORT_SYMBOL(iommu_pm_get_hw_ops_v1);
+
diff --git a/drivers/iommu/msm_iommu_perfmon.c b/drivers/iommu/msm_iommu_perfmon.c
index 97bd660..41df1ed 100644
--- a/drivers/iommu/msm_iommu_perfmon.c
+++ b/drivers/iommu/msm_iommu_perfmon.c
@@ -20,40 +20,12 @@
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
-#include <mach/iommu_hw-v1.h>
 #include <mach/iommu.h>
 #include <mach/iommu_perfmon.h>
 
-#define PMCR_P_MASK		(0x1)
-#define PMCR_P_SHIFT		(1)
-#define PMCR_P			(PMCR_P_MASK << PMCR_P_SHIFT)
-#define PMCFGR_NCG_MASK		(0xFF)
-#define PMCFGR_NCG_SHIFT	(24)
-#define PMCFGR_NCG		(PMCFGR_NCG_MASK << PMCFGR_NCG_SHIFT)
-#define PMCFGR_N_MASK		(0xFF)
-#define PMCFGR_N_SHIFT		(0)
-#define PMCFGR_N		(PMCFGR_N_MASK << PMCFGR_N_SHIFT)
-#define CR_E			0x1
-#define CGCR_CEN		0x800
-#define CGCR_CEN_SHFT		(1 << 11)
-#define PMCGCR_CGNC_MASK	(0x0F)
-#define PMCGCR_CGNC_SHIFT	(24)
-#define PMCGCR_CGNC		(PMCGCR_CGNC_MASK << PMCGCR_CGNC_SHIFT)
-#define PMCGCR_(group)		(PMCGCR_N + group*4)
-
-#define PMOVSCLR_(n)		(PMOVSCLR_N + n*4)
-#define PMCNTENSET_(n)		(PMCNTENSET_N + n*4)
-#define PMCNTENCLR_(n)		(PMCNTENCLR_N + n*4)
-#define PMINTENSET_(n)		(PMINTENSET_N + n*4)
-#define PMINTENCLR_(n)		(PMINTENCLR_N + n*4)
-
-#define PMEVCNTR_(n)		(PMEVCNTR_N + n*4)
-#define PMEVTYPER_(n)		(PMEVTYPER_N + n*4)
-
 static LIST_HEAD(iommu_list);
 static struct dentry *msm_iommu_root_debugfs_dir;
 static const char *NO_EVENT_CLASS_NAME = "none";
-static int NO_EVENT_CLASS = -1;
 static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
 
 struct event_class {
@@ -81,11 +53,6 @@
 	{ 0xb1, "tot_num_pred_axi_htw_read_req" },
 };
 
-static unsigned int iommu_pm_is_hw_access_OK(const struct iommu_pmon *pmon)
-{
-	return pmon->enabled && (pmon->iommu_attach_count > 0);
-}
-
 static unsigned int iommu_pm_create_sup_cls_str(char **buf,
 						struct iommu_pmon *pmon)
 {
@@ -151,7 +118,7 @@
 	size_t array_len;
 	struct event_class *ptr;
 	int i;
-	int event_class = NO_EVENT_CLASS;
+	int event_class = MSM_IOMMU_PMU_NO_EVENT_CLASS;
 
 	if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
 		goto out;
@@ -194,170 +161,6 @@
 	return NULL;
 }
 
-static void iommu_pm_grp_enable(struct iommu_info *iommu, unsigned int grp_no)
-{
-	unsigned int pmcgcr;
-	pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
-	pmcgcr |= CGCR_CEN;
-	writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
-}
-
-static void iommu_pm_grp_disable(struct iommu_info *iommu, unsigned int grp_no)
-{
-	unsigned int pmcgcr;
-	pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
-	pmcgcr &= ~CGCR_CEN;
-	writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
-}
-
-static void iommu_pm_enable(struct iommu_info *iommu)
-{
-	unsigned int pmcr;
-	pmcr = readl_relaxed(iommu->base + PMCR);
-	pmcr |= CR_E;
-	writel_relaxed(pmcr, iommu->base + PMCR);
-}
-
-static void iommu_pm_disable(struct iommu_info *iommu)
-{
-	unsigned int pmcr;
-	pmcr = readl_relaxed(iommu->base + PMCR);
-	pmcr &= ~CR_E;
-	writel_relaxed(pmcr, iommu->base + PMCR);
-}
-
-static void iommu_pm_reset_counters(const struct iommu_info *iommu)
-{
-	unsigned int pmcr;
-	pmcr = readl_relaxed(iommu->base + PMCR);
-	pmcr |= PMCR_P;
-	writel_relaxed(pmcr, iommu->base + PMCR);
-}
-
-static void iommu_pm_check_for_overflow(struct iommu_pmon *pmon)
-{
-	struct iommu_pmon_counter *counter;
-	struct iommu_info *iommu = &pmon->iommu;
-	unsigned int reg_no = 0;
-	unsigned int bit_no;
-	unsigned int reg_value;
-	unsigned int i;
-	unsigned int j;
-	unsigned int curr_reg = 0;
-
-	reg_value = readl_relaxed(iommu->base + PMOVSCLR_(curr_reg));
-
-	for (i = 0; i < pmon->num_groups; ++i) {
-		struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
-		for (j = 0; j < cnt_grp->num_counters; ++j) {
-			counter = &cnt_grp->counters[j];
-			reg_no = counter->absolute_counter_no / 32;
-			bit_no = counter->absolute_counter_no % 32;
-			if (reg_no != curr_reg) {
-				/* Clear overflow bits */
-				writel_relaxed(reg_value, iommu->base +
-					       PMOVSCLR_(reg_no));
-				curr_reg = reg_no;
-				reg_value = readl_relaxed(iommu->base +
-							  PMOVSCLR_(curr_reg));
-			}
-
-			if (counter->enabled) {
-				if (reg_value & (1 << bit_no))
-					counter->overflow_count++;
-			}
-		}
-	}
-
-	/* Clear overflow */
-	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
-}
-
-irqreturn_t iommu_pm_evt_ovfl_int_handler(int irq, void *dev_id)
-{
-	struct iommu_pmon *pmon = dev_id;
-	struct iommu_info *iommu = &pmon->iommu;
-
-	mutex_lock(&pmon->lock);
-
-	if (!iommu_pm_is_hw_access_OK(pmon)) {
-		mutex_unlock(&pmon->lock);
-		goto out;
-	}
-
-	iommu->ops->iommu_lock_acquire();
-	iommu_pm_check_for_overflow(pmon);
-	iommu->ops->iommu_lock_release();
-
-	mutex_unlock(&pmon->lock);
-
-out:
-	return IRQ_HANDLED;
-}
-
-static void iommu_pm_counter_enable(struct iommu_info *iommu,
-				    struct iommu_pmon_counter *counter)
-{
-	unsigned int reg_no = counter->absolute_counter_no / 32;
-	unsigned int bit_no = counter->absolute_counter_no % 32;
-	unsigned int reg_value;
-
-	/* Clear overflow of counter */
-	reg_value = 1 << bit_no;
-	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
-
-	/* Enable counter */
-	writel_relaxed(reg_value, iommu->base + PMCNTENSET_(reg_no));
-	counter->enabled = 1;
-}
-
-static void iommu_pm_counter_disable(struct iommu_info *iommu,
-				     struct iommu_pmon_counter *counter)
-{
-	unsigned int reg_no = counter->absolute_counter_no / 32;
-	unsigned int bit_no = counter->absolute_counter_no % 32;
-	unsigned int reg_value;
-
-	counter->enabled = 0;
-
-	/* Disable counter */
-	reg_value = 1 << bit_no;
-	writel_relaxed(reg_value, iommu->base + PMCNTENCLR_(reg_no));
-
-	/* Clear overflow of counter */
-	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
-}
-
-/*
- * Must be called after iommu_start_access() is called
- */
-static void iommu_pm_ovfl_int_enable(struct iommu_info *iommu,
-				     const struct iommu_pmon_counter *counter)
-{
-	unsigned int reg_no = counter->absolute_counter_no / 32;
-	unsigned int bit_no = counter->absolute_counter_no % 32;
-	unsigned int reg_value;
-
-	/* Enable overflow interrupt for counter */
-	reg_value = (1 << bit_no);
-	writel_relaxed(reg_value, iommu->base + PMINTENSET_(reg_no));
-}
-
-/*
- * Must be called after iommu_start_access() is called
- */
-static void iommu_pm_ovfl_int_disable(struct iommu_info *iommu,
-				      const struct iommu_pmon_counter *counter)
-{
-	unsigned int reg_no = counter->absolute_counter_no / 32;
-	unsigned int bit_no = counter->absolute_counter_no % 32;
-	unsigned int reg_value;
-
-	/* Disable overflow interrupt for counter */
-	reg_value = 1 << bit_no;
-	writel_relaxed(reg_value, iommu->base + PMINTENCLR_(reg_no));
-}
-
 static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
 				    struct iommu_pmon_counter *counter)
 {
@@ -368,12 +171,12 @@
 	event_class = counter->current_event_class;
 	count_no = counter->absolute_counter_no;
 
-	if (event_class == NO_EVENT_CLASS) {
-		if (iommu_pm_is_hw_access_OK(pmon)) {
+	if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
+		if (iommu->hw_ops->is_hw_access_OK(pmon)) {
 			iommu->ops->iommu_lock_acquire();
-			iommu_pm_counter_disable(iommu, counter);
-			iommu_pm_ovfl_int_disable(iommu, counter);
-			writel_relaxed(0, iommu->base + PMEVTYPER_(count_no));
+			iommu->hw_ops->counter_disable(iommu, counter);
+			iommu->hw_ops->ovfl_int_disable(iommu, counter);
+			iommu->hw_ops->set_event_class(pmon, count_no, 0);
 			iommu->ops->iommu_lock_release();
 		}
 		counter->overflow_count = 0;
@@ -381,12 +184,12 @@
 	} else {
 		counter->overflow_count = 0;
 		counter->value = 0;
-		if (iommu_pm_is_hw_access_OK(pmon)) {
+		if (iommu->hw_ops->is_hw_access_OK(pmon)) {
 			iommu->ops->iommu_lock_acquire();
-			writel_relaxed(event_class,
-					iommu->base + PMEVTYPER_(count_no));
-			iommu_pm_ovfl_int_enable(iommu, counter);
-			iommu_pm_counter_enable(iommu, counter);
+			iommu->hw_ops->set_event_class(pmon, count_no,
+					event_class);
+			iommu->hw_ops->ovfl_int_enable(iommu, counter);
+			iommu->hw_ops->counter_enable(iommu, counter);
 			iommu->ops->iommu_lock_release();
 		}
 	}
@@ -405,19 +208,6 @@
 	}
 }
 
-static unsigned int iommu_pm_read_counter(struct iommu_pmon_counter *counter)
-{
-	struct iommu_pmon *pmon = counter->cnt_group->pmon;
-	struct iommu_info *info = &pmon->iommu;
-	unsigned int cnt_no = counter->absolute_counter_no;
-	unsigned int pmevcntr;
-
-	pmevcntr = readl_relaxed(info->base + PMEVCNTR_(cnt_no));
-
-	return pmevcntr;
-
-}
-
 static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
 {
 	unsigned int i;
@@ -433,12 +223,13 @@
 {
 	unsigned int i;
 	unsigned int j;
+	struct iommu_info *iommu = &pmon->iommu;
 	for (i = 0; i < pmon->num_groups; ++i) {
 		struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
 		for (j = 0; j < cnt_grp->num_counters; ++j) {
 			struct iommu_pmon_counter *counter;
 			counter = &cnt_grp->counters[j];
-			counter->value = iommu_pm_read_counter(counter);
+			counter->value = iommu->hw_ops->read_counter(counter);
 		}
 	}
 }
@@ -452,6 +243,12 @@
 
 	iommu->ops->iommu_power_on(iommu_drvdata);
 
+	/* Reset counters in HW */
+	iommu->ops->iommu_lock_acquire();
+	iommu->hw_ops->reset_counters(&pmon->iommu);
+	iommu->ops->iommu_lock_release();
+
+	/* Reset SW counters */
 	iommu_pm_reset_counts(pmon);
 
 	pmon->enabled = 1;
@@ -462,10 +259,10 @@
 
 	/* enable all counter group */
 	for (i = 0; i < pmon->num_groups; ++i)
-		iommu_pm_grp_enable(iommu, i);
+		iommu->hw_ops->grp_enable(iommu, i);
 
 	/* enable global counters */
-	iommu_pm_enable(iommu);
+	iommu->hw_ops->enable_pm(iommu);
 	iommu->ops->iommu_lock_release();
 
 	pr_info("%s: TLB performance monitoring turned ON\n",
@@ -484,14 +281,14 @@
 	iommu->ops->iommu_lock_acquire();
 
 	/* disable global counters */
-	iommu_pm_disable(iommu);
+	iommu->hw_ops->disable_pm(iommu);
 
 	/* Check if we overflowed just before turning off pmon */
-	iommu_pm_check_for_overflow(pmon);
+	iommu->hw_ops->check_for_overflow(pmon);
 
 	/* disable all counter group */
 	for (i = 0; i < pmon->num_groups; ++i)
-		iommu_pm_grp_disable(iommu, i);
+		iommu->hw_ops->grp_disable(iommu, i);
 
 	/* Update cached copy of counters before turning off power */
 	iommu_pm_read_all_counters(pmon);
@@ -524,9 +321,9 @@
 
 	mutex_lock(&pmon->lock);
 
-	if (iommu_pm_is_hw_access_OK(pmon)) {
+	if (iommu->hw_ops->is_hw_access_OK(pmon)) {
 		iommu->ops->iommu_lock_acquire();
-		counter->value = iommu_pm_read_counter(counter);
+		counter->value = iommu->hw_ops->read_counter(counter);
 		iommu->ops->iommu_lock_release();
 	}
 	full_count = (unsigned long long) counter->value +
@@ -631,9 +428,9 @@
 		buf[wr_cnt-1] = '\0';
 		rv = kstrtoul(buf, 10, &cmd);
 		if (!rv && (cmd == 1)) {
-			if (iommu_pm_is_hw_access_OK(pmon)) {
+			if (iommu->hw_ops->is_hw_access_OK(pmon)) {
 				iommu->ops->iommu_lock_acquire();
-				iommu_pm_reset_counters(&pmon->iommu);
+				iommu->hw_ops->reset_counters(&pmon->iommu);
 				iommu->ops->iommu_lock_release();
 			}
 			iommu_pm_reset_counts(pmon);
@@ -761,7 +558,8 @@
 		(*abs_counter_no)++;
 		cnt_grp->counters[j].value = 0;
 		cnt_grp->counters[j].overflow_count = 0;
-		cnt_grp->counters[j].current_event_class = NO_EVENT_CLASS;
+		cnt_grp->counters[j].current_event_class =
+						MSM_IOMMU_PMU_NO_EVENT_CLASS;
 
 		snprintf(name, 20, "counter%u", j);
 
@@ -894,11 +692,13 @@
 	if (ret)
 		goto free_mem;
 
+	iommu->hw_ops->initialize_hw(pmon_entry);
+
 	if (iommu->evt_irq > 0) {
 		ret = request_threaded_irq(iommu->evt_irq, NULL,
-				iommu_pm_evt_ovfl_int_handler,
+				iommu->hw_ops->evt_ovfl_int_handler,
 				IRQF_ONESHOT | IRQF_SHARED,
-				"msm_iommu_nonsecure_irq", pmon_entry);
+				"msm_iommu_pmon_nonsecure_irq", pmon_entry);
 		if (ret) {
 			pr_err("Request IRQ %d failed with ret=%d\n",
 								iommu->evt_irq,
diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c
index 61b36eb..c3a5564 100644
--- a/drivers/leds/leds-pm8xxx.c
+++ b/drivers/leds/leds-pm8xxx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,9 @@
 #define WLED_BOOST_CFG_REG		SSBI_REG_ADDR_WLED_CTRL(14)
 #define WLED_HIGH_POLE_CAP_REG		SSBI_REG_ADDR_WLED_CTRL(16)
 
+#define WLED_STRING_ONE			0	/* Rightmost string */
+#define WLED_STRING_TWO			1	/* Middle string */
+#define WLED_STRING_THREE		2	/* Leftmost string */
 #define WLED_STRINGS			0x03
 #define WLED_OVP_VAL_MASK		0x30
 #define WLED_OVP_VAL_BIT_SHFT		0x04
@@ -87,10 +90,6 @@
 #define TWO_WLED_STRINGS		2
 #define THREE_WLED_STRINGS		3
 
-#define WLED_CABC_ONE_STRING		0x01
-#define WLED_CABC_TWO_STRING		0x03
-#define WLED_CABC_THREE_STRING		0x07
-
 #define WLED_CABC_SHIFT			3
 
 #define SSBI_REG_ADDR_RGB_CNTL1		0x12D
@@ -247,7 +246,7 @@
 led_wled_set(struct pm8xxx_led_data *led, enum led_brightness value)
 {
 	int rc, duty;
-	u8 val, i, num_wled_strings;
+	u8 val, i;
 
 	if (value > WLED_MAX_LEVEL)
 		value = WLED_MAX_LEVEL;
@@ -272,36 +271,41 @@
 
 	duty = (WLED_MAX_DUTY_CYCLE * value) / WLED_MAX_LEVEL;
 
-	num_wled_strings = led->wled_cfg->num_strings;
-
 	/* program brightness control registers */
-	for (i = 0; i < num_wled_strings; i++) {
-		rc = pm8xxx_readb(led->dev->parent,
-				WLED_BRIGHTNESS_CNTL_REG1(i), &val);
-		if (rc) {
-			dev_err(led->dev->parent, "can't read wled brightnes ctrl"
-				" register1 rc=%d\n", rc);
-			return rc;
-		}
+	for (i = 0; i < WLED_STRINGS; i++) {
+		if (led->wled_cfg->strings && (1 << i)) {
+			rc = pm8xxx_readb(led->dev->parent,
+					WLED_BRIGHTNESS_CNTL_REG1(i), &val);
+			if (rc) {
+				dev_err(led->dev->parent,
+					"can't read wled brightnes ctrl"
+					" register1 rc=%d\n", rc);
+				return rc;
+			}
 
-		val = (val & ~WLED_BRIGHTNESS_MSB_MASK) |
-			(duty >> WLED_8_BIT_SHFT);
-		rc = pm8xxx_writeb(led->dev->parent,
-				WLED_BRIGHTNESS_CNTL_REG1(i), val);
-		if (rc) {
-			dev_err(led->dev->parent, "can't write wled brightness ctrl"
-				" register1 rc=%d\n", rc);
-			return rc;
-		}
+			val = (val & ~WLED_MAX_CURR_MASK) |
+				(duty >> WLED_8_BIT_SHFT);
 
-		val = duty & WLED_8_BIT_MASK;
-		rc = pm8xxx_writeb(led->dev->parent,
-				WLED_BRIGHTNESS_CNTL_REG2(i), val);
-		if (rc) {
-			dev_err(led->dev->parent, "can't write wled brightness ctrl"
-				" register2 rc=%d\n", rc);
-			return rc;
-		}
+			rc = pm8xxx_writeb(led->dev->parent,
+					WLED_BRIGHTNESS_CNTL_REG1(i), val);
+			if (rc) {
+				dev_err(led->dev->parent,
+					"can't write wled brightness ctrl"
+					" register1 rc=%d\n", rc);
+				return rc;
+			}
+
+			val = duty & WLED_8_BIT_MASK;
+			rc = pm8xxx_writeb(led->dev->parent,
+					WLED_BRIGHTNESS_CNTL_REG2(i), val);
+			if (rc) {
+				dev_err(led->dev->parent,
+					"can't write wled brightness ctrl"
+					" register2 rc=%d\n", rc);
+				return rc;
+			}
+		} else
+			continue;
 	}
 	rc = pm8xxx_readb(led->dev->parent, WLED_SYNC_REG, &val);
 	if (rc) {
@@ -564,9 +568,7 @@
 static int __devinit init_wled(struct pm8xxx_led_data *led)
 {
 	int rc, i;
-	u8 val, num_wled_strings;
-
-	num_wled_strings = led->wled_cfg->num_strings;
+	u8 val, string_max_current;
 
 	/* program over voltage protection threshold */
 	if (led->wled_cfg->ovp_val > WLED_OVP_37V) {
@@ -640,38 +642,61 @@
 	}
 
 	/* program activation delay and maximum current */
-	for (i = 0; i < num_wled_strings; i++) {
-		rc = pm8xxx_readb(led->dev->parent,
-				WLED_MAX_CURR_CFG_REG(i), &val);
-		if (rc) {
-			dev_err(led->dev->parent, "can't read wled max current"
-				" config register rc=%d\n", rc);
-			return rc;
-		}
+	for (i = 0; i < WLED_STRINGS; i++) {
+		if (led->wled_cfg->strings && (1 << i)) {
+			rc = pm8xxx_readb(led->dev->parent,
+					WLED_MAX_CURR_CFG_REG(i), &val);
+			if (rc) {
+				dev_err(led->dev->parent,
+					"can't read wled max current"
+					" config register rc=%d\n", rc);
+				return rc;
+			}
 
-		if ((led->wled_cfg->ctrl_delay_us % WLED_CTL_DLY_STEP) ||
-			(led->wled_cfg->ctrl_delay_us > WLED_CTL_DLY_MAX)) {
-			dev_err(led->dev->parent, "Invalid control delay\n");
-			return rc;
-		}
+			if ((led->wled_cfg->ctrl_delay_us % WLED_CTL_DLY_STEP)
+				|| (led->wled_cfg->ctrl_delay_us >
+					WLED_CTL_DLY_MAX)) {
+				dev_err(led->dev->parent,
+					"Invalid control delay\n");
+				return rc;
+			}
 
-		val = val / WLED_CTL_DLY_STEP;
-		val = (val & ~WLED_CTL_DLY_MASK) |
-			(led->wled_cfg->ctrl_delay_us << WLED_CTL_DLY_BIT_SHFT);
+			val = val / WLED_CTL_DLY_STEP;
+			val = (val & ~WLED_CTL_DLY_MASK) |
+				(led->wled_cfg->ctrl_delay_us <<
+					WLED_CTL_DLY_BIT_SHFT);
 
-		if ((led->max_current > WLED_MAX_CURR)) {
-			dev_err(led->dev->parent, "Invalid max current\n");
-			return -EINVAL;
-		}
+			if ((led->max_current > WLED_MAX_CURR)) {
+				dev_err(led->dev->parent,
+					"Invalid max current\n");
+				return -EINVAL;
+			}
+		if (led->wled_cfg->max_current_ind) {
+			switch (i) {
+			case WLED_STRING_ONE:
+				string_max_current = led->wled_cfg->max_one;
+				break;
+			case WLED_STRING_TWO:
+				string_max_current = led->wled_cfg->max_two;
+				break;
+			case WLED_STRING_THREE:
+				string_max_current = led->wled_cfg->max_three;
+				break;
+			default:
+				return -EINVAL;
+			}
+			val = (val & ~WLED_MAX_CURR_MASK) | string_max_current;
+		} else
+			val = (val & ~WLED_MAX_CURR_MASK) | led->max_current;
 
-		val = (val & ~WLED_MAX_CURR_MASK) | led->max_current;
-
-		rc = pm8xxx_writeb(led->dev->parent,
-				WLED_MAX_CURR_CFG_REG(i), val);
-		if (rc) {
-			dev_err(led->dev->parent, "can't write wled max current"
-				" config register rc=%d\n", rc);
-			return rc;
+			rc = pm8xxx_writeb(led->dev->parent,
+					WLED_MAX_CURR_CFG_REG(i), val);
+			if (rc) {
+				dev_err(led->dev->parent,
+					"can't write wled max current"
+					" config register rc=%d\n", rc);
+				return rc;
+			}
 		}
 	}
 
@@ -683,19 +708,7 @@
 			return rc;
 		}
 
-		switch (num_wled_strings) {
-		case ONE_WLED_STRING:
-			val |= (WLED_CABC_ONE_STRING << WLED_CABC_SHIFT);
-			break;
-		case TWO_WLED_STRINGS:
-			val |= (WLED_CABC_TWO_STRING << WLED_CABC_SHIFT);
-			break;
-		case THREE_WLED_STRINGS:
-			val |= (WLED_CABC_THREE_STRING << WLED_CABC_SHIFT);
-			break;
-		default:
-			break;
-		}
+		val |= (led->wled_cfg->strings << WLED_CABC_SHIFT);
 
 		rc = pm8xxx_writeb(led->dev->parent, WLED_SYNC_REG, val);
 		if (rc) {
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index dfb8d58..2a0cde9 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -514,6 +514,10 @@
 			((feed->pid != pid) && (feed->pid != 0x2000)))
 			continue;
 
+		if (feed->secure_mode.is_secured &&
+			!dvb_dmx_is_rec_feed(feed))
+			return 0;
+
 		if (feed->type == DMX_TYPE_TS) {
 			desired_space = 192; /* upper bound */
 			ts = &feed->feed.ts;
@@ -593,19 +597,23 @@
 		if (!feed->feed.ts.is_filtering)
 			break;
 		if (feed->ts_type & TS_PACKET) {
-			if (feed->ts_type & TS_PAYLOAD_ONLY)
-				dvb_dmx_swfilter_payload(feed, buf);
-			else
+			if (feed->ts_type & TS_PAYLOAD_ONLY) {
+				if (!feed->secure_mode.is_secured)
+					dvb_dmx_swfilter_payload(feed, buf);
+			} else {
 				dvb_dmx_swfilter_output_packet(feed,
 						buf, timestamp);
+			}
 		}
-		if (feed->ts_type & TS_DECODER)
+		if ((feed->ts_type & TS_DECODER) &&
+			!feed->secure_mode.is_secured)
 			if (feed->demux->write_to_decoder)
 				feed->demux->write_to_decoder(feed, buf, 188);
 		break;
 
 	case DMX_TYPE_SEC:
-		if (!feed->feed.sec.is_filtering)
+		if (!feed->feed.sec.is_filtering ||
+			feed->secure_mode.is_secured)
 			break;
 		if (dvb_dmx_swfilter_section_packet(feed, buf) < 0)
 			feed->feed.sec.seclen = feed->feed.sec.secbufp = 0;
@@ -1212,17 +1220,22 @@
 {
 	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
 	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+	int ret = 0;
 
 	mutex_lock(&dvbdmx->mutex);
 
-	dvbdmxfeed->secure_mode = *secure_mode;
-
 	if ((dvbdmxfeed->state == DMX_STATE_GO) &&
-		dvbdmxfeed->demux->set_secure_mode)
-		dvbdmxfeed->demux->set_secure_mode(dvbdmxfeed, secure_mode);
+		dvbdmxfeed->demux->set_secure_mode) {
+		ret = dvbdmxfeed->demux->set_secure_mode(dvbdmxfeed,
+			secure_mode);
+		if (!ret)
+			dvbdmxfeed->secure_mode = *secure_mode;
+	} else {
+		dvbdmxfeed->secure_mode = *secure_mode;
+	}
 
 	mutex_unlock(&dvbdmx->mutex);
-	return 0;
+	return ret;
 }
 
 static int dmx_ts_set_indexing_params(
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index f5f6039..f3dc4b8 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -203,5 +203,85 @@
 void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf,
 				const u8 timestamp[TIMESTAMP_LEN]);
 
+/**
+ * dvb_dmx_is_video_feed - Returns whether the PES feed
+ * is video one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return     1 if feed is video feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_video_feed(struct dvb_demux_feed *feed)
+{
+	if (feed->type != DMX_TYPE_TS)
+		return 0;
+
+	if (feed->ts_type & (~TS_DECODER))
+		return 0;
+
+	if ((feed->pes_type == DMX_TS_PES_VIDEO0) ||
+		(feed->pes_type == DMX_TS_PES_VIDEO1) ||
+		(feed->pes_type == DMX_TS_PES_VIDEO2) ||
+		(feed->pes_type == DMX_TS_PES_VIDEO3))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * dvb_dmx_is_pcr_feed - Returns whether the PES feed
+ * is PCR one.
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return     1 if feed is PCR feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_pcr_feed(struct dvb_demux_feed *feed)
+{
+	if (feed->type != DMX_TYPE_TS)
+		return 0;
+
+	if (feed->ts_type & (~TS_DECODER))
+		return 0;
+
+	if ((feed->pes_type == DMX_TS_PES_PCR0) ||
+		(feed->pes_type == DMX_TS_PES_PCR1) ||
+		(feed->pes_type == DMX_TS_PES_PCR2) ||
+		(feed->pes_type == DMX_TS_PES_PCR3))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * dvb_dmx_is_sec_feed - Returns whether this is a section feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is a section feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_sec_feed(struct dvb_demux_feed *feed)
+{
+	return (feed->type == DMX_TYPE_SEC);
+}
+
+/**
+ * dvb_dmx_is_rec_feed - Returns whether this is a recording feed
+ *
+ * @feed: The feed to be checked.
+ *
+ * Return 1 if feed is recording feed, 0 otherwise.
+ */
+static inline int dvb_dmx_is_rec_feed(struct dvb_demux_feed *feed)
+{
+	if (feed->type != DMX_TYPE_TS)
+		return 0;
+
+	if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY))
+		return 0;
+
+	return 1;
+}
+
 
 #endif /* _DVB_DEMUX_H_ */
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index 2bbdc22..e4777e6 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -82,6 +82,15 @@
 		snapshot config = 4000 * 3000 at 20 fps,
 		hfr video at 60, 90 and 120 fps.
 
+config IMX135
+	bool "Sensor IMX135 (BAYER 12M)"
+	depends on MSMB_CAMERA
+	---help---
+		Sony 12 MP Bayer Sensor with auto focus, uses
+		4 mipi lanes, preview config = 2104 x 1560 at 49 fps,
+		snapshot config = 4208 x 3120 at 24 fps,
+		Video HDR support.
+
 config OV2720
 	bool "Sensor OV2720 (BAYER 2M)"
 	depends on MSMB_CAMERA
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index d714ffb..69d523c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -77,8 +77,6 @@
 	void (*process_axi_irq) (struct vfe_device *vfe_dev,
 		uint32_t irq_status0, uint32_t irq_status1,
 		struct msm_isp_timestamp *ts);
-	void (*process_error_irq) (struct vfe_device *vfe_dev,
-		uint32_t irq_status0, uint32_t irq_status1);
 	void (*process_stats_irq) (struct vfe_device *vfe_dev,
 		uint32_t irq_status0, uint32_t irq_status1,
 		struct msm_isp_timestamp *ts);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 8f00e80..b981653 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -17,11 +17,13 @@
 #include "msm_isp32.h"
 #include "msm_isp_util.h"
 #include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
 #include "msm_isp.h"
 #include "msm.h"
 #include "msm_camera_io_util.h"
 
-#define VFE32_BURST_LEN 4
+#define VFE32_BURST_LEN 3
+#define VFE32_UB_SIZE 1024
 #define VFE32_EQUAL_SLICE_UB 117
 #define VFE32_WM_BASE(idx) (0x4C + 0x18 * idx)
 #define VFE32_RDI_BASE(idx) (idx ? 0x734 + 0x4 * (idx - 1) : 0x06FC)
@@ -30,6 +32,13 @@
 #define VFE32_PING_PONG_BASE(wm, ping_pong) \
 	(VFE32_WM_BASE(wm) + 0x4 * (1 + (~(ping_pong >> wm) & 0x1)))
 
+#define VFE32_NUM_STATS_TYPE 7
+#define VFE32_STATS_PING_PONG_OFFSET 7
+#define VFE32_STATS_BASE(idx) (0xF4 + 0xC * idx)
+#define VFE32_STATS_PING_PONG_BASE(idx, ping_pong) \
+	(VFE32_STATS_BASE(idx) + 0x4 * \
+	(~(ping_pong >> (idx + VFE32_STATS_PING_PONG_OFFSET)) & 0x1))
+
 /*Temporary use fixed bus vectors in VFE */
 static struct msm_bus_vectors msm_vfe32_init_vectors[] = {
 	{
@@ -177,17 +186,16 @@
 		ISP_DBG("%s: PIX0 frame id: %lu\n", __func__,
 			vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
 		msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
+		ISP_DBG("%s: SOF IRQ\n", __func__);
+		if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
+			&& vfe_dev->axi_data.src_info[VFE_PIX_0].
+			pix_stream_count == 0) {
+			msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
+			msm_isp_update_framedrop_reg(vfe_dev);
+		}
 	}
 }
 
-static void msm_vfe32_process_stats_irq(struct vfe_device *vfe_dev,
-	uint32_t irq_status0, uint32_t irq_status1,
-	struct msm_isp_timestamp *ts)
-{
-	/* todo: add stats specific code */
-	return;
-}
-
 static void msm_vfe32_process_violation_status(struct vfe_device *vfe_dev)
 {
 	uint32_t violation_status = vfe_dev->error_info.violation_status;
@@ -326,12 +334,22 @@
 	if (!(irq_status0 & 0x20) && !(irq_status1 & 0x1C000000))
 		return;
 
+	if (irq_status0 & BIT(5))
+		msm_isp_sof_notify(vfe_dev, VFE_PIX_0, ts);
+	if (irq_status1 & BIT(26))
+		msm_isp_sof_notify(vfe_dev, VFE_RAW_0, ts);
+	if (irq_status1 & BIT(27))
+		msm_isp_sof_notify(vfe_dev, VFE_RAW_1, ts);
+	if (irq_status1 & BIT(28))
+		msm_isp_sof_notify(vfe_dev, VFE_RAW_2, ts);
+
 	if (vfe_dev->axi_data.stream_update)
 		msm_isp_axi_stream_update(vfe_dev);
-
 	msm_isp_update_framedrop_reg(vfe_dev);
 	msm_isp_update_error_frame_count(vfe_dev);
 
+	vfe_dev->hw_info->vfe_ops.core_ops.
+		reg_update(vfe_dev);
 	return;
 }
 
@@ -462,6 +480,46 @@
 	}
 }
 
+static void msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
+	struct msm_vfe_axi_stream_request_cmd *stream_req_cmd)
+{
+	int bpp, bpp_reg = 0;
+	uint32_t io_format_reg;
+	bpp = msm_isp_get_bit_per_pixel(stream_req_cmd->output_format);
+
+	switch (bpp) {
+	case 8:
+		bpp_reg = 0;
+		break;
+	case 10:
+		bpp_reg = 1 << 0;
+		break;
+	case 12:
+		bpp_reg = 1 << 1;
+		break;
+	}
+	io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x6F8);
+	switch (stream_req_cmd->stream_src) {
+	case CAMIF_RAW:
+		io_format_reg &= 0xFFFFCFFF;
+		io_format_reg |= bpp_reg << 12;
+		break;
+	case IDEAL_RAW:
+		io_format_reg &= 0xFFFFFFC8;
+		io_format_reg |= bpp_reg << 4;
+		break;
+	case PIX_ENCODER:
+	case PIX_VIEWFINDER:
+	case RDI_INTF_0:
+	case RDI_INTF_1:
+	case RDI_INTF_2:
+	default:
+		pr_err("%s: Invalid stream source\n", __func__);
+		return;
+	}
+	msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x6F8);
+}
+
 static void msm_vfe32_cfg_camif(struct vfe_device *vfe_dev,
 	struct msm_vfe_pix_cfg *pix_cfg)
 {
@@ -481,10 +539,10 @@
 					camif_cfg->pixels_per_line,
 					vfe_dev->vfe_base + 0x1EC);
 
-	msm_camera_io_w(ISP_SUB(first_pixel) << 16 | ISP_SUB(last_pixel),
+	msm_camera_io_w(first_pixel << 16 | last_pixel,
 					vfe_dev->vfe_base + 0x1F0);
 
-	msm_camera_io_w(ISP_SUB(first_line) << 16 | ISP_SUB(last_line),
+	msm_camera_io_w(first_line << 16 | last_line,
 					vfe_dev->vfe_base + 0x1F4);
 
 	val = msm_camera_io_r(vfe_dev->vfe_base + 0x6FC);
@@ -518,6 +576,9 @@
 	} else if (update_state == DISABLE_CAMIF) {
 		msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1E0);
 		vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+	} else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
+		msm_camera_io_w_mb(0x2, vfe_dev->vfe_base + 0x1E0);
+		vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
 	}
 }
 
@@ -723,7 +784,28 @@
 
 static int msm_vfe32_get_stats_idx(enum msm_isp_stats_type stats_type)
 {
-	return 0;
+	switch (stats_type) {
+	case MSM_ISP_STATS_AEC:
+	case MSM_ISP_STATS_BG:
+		return 0;
+	case MSM_ISP_STATS_AF:
+	case MSM_ISP_STATS_BF:
+		return 1;
+	case MSM_ISP_STATS_AWB:
+		return 2;
+	case MSM_ISP_STATS_RS:
+		return 3;
+	case MSM_ISP_STATS_CS:
+		return 4;
+	case MSM_ISP_STATS_IHIST:
+		return 5;
+	case MSM_ISP_STATS_SKIN:
+	case MSM_ISP_STATS_BHIST:
+		return 6;
+	default:
+		pr_err("%s: Invalid stats type\n", __func__);
+		return -EINVAL;
+	}
 }
 
 static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev)
@@ -734,60 +816,120 @@
 static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
+	uint32_t irq_mask;
+	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+	irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
+	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
 	return;
 }
 
 static void msm_vfe32_stats_clear_wm_irq_mask(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
+	uint32_t irq_mask;
+	irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+	irq_mask &= ~(BIT(STATS_IDX(stream_info->stream_handle) + 13));
+	msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
 	return;
 }
 
 static void msm_vfe32_stats_cfg_wm_reg(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
+	/*Nothing to configure for VFE3.x*/
 	return;
 }
 
 static void msm_vfe32_stats_clear_wm_reg(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info)
 {
+	/*Nothing to configure for VFE3.x*/
 	return;
 }
 
 static void msm_vfe32_stats_cfg_ub(struct vfe_device *vfe_dev)
 {
+	int i;
+	uint32_t ub_offset = VFE32_UB_SIZE;
+	uint32_t ub_size[VFE32_NUM_STATS_TYPE] = {
+		64, /*MSM_ISP_STATS_BG*/
+		64, /*MSM_ISP_STATS_BF*/
+		16, /*MSM_ISP_STATS_AWB*/
+		8,  /*MSM_ISP_STATS_RS*/
+		16, /*MSM_ISP_STATS_CS*/
+		16, /*MSM_ISP_STATS_IHIST*/
+		16, /*MSM_ISP_STATS_BHIST*/
+	};
+
+	for (i = 0; i < VFE32_NUM_STATS_TYPE; i++) {
+		ub_offset -= ub_size[i];
+		msm_camera_io_w(ub_offset << 16 | (ub_size[i] - 1),
+			vfe_dev->vfe_base + VFE32_STATS_BASE(i) + 0x8);
+	}
 	return;
 }
 
 static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
 	uint32_t stats_mask, uint8_t enable)
 {
-	return;
+	int i;
+	uint32_t module_cfg, module_cfg_mask = 0;
+
+	for (i = 0; i < VFE32_NUM_STATS_TYPE; i++) {
+		if ((stats_mask >> i) & 0x1) {
+			switch (i) {
+			case 0:
+			case 1:
+			case 2:
+			case 3:
+			case 4:
+				module_cfg_mask |= 1 << (5 + i);
+				break;
+			case 5:
+				module_cfg_mask |= 1 << 16;
+				break;
+			case 6:
+				module_cfg_mask |= 1 << 19;
+				break;
+			default:
+				pr_err("%s: Invalid stats mask\n", __func__);
+				return;
+			}
+		}
+	}
+
+	module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x10);
+	if (enable)
+		module_cfg |= module_cfg_mask;
+	else
+		module_cfg &= ~module_cfg_mask;
+	msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x10);
 }
 
 static void msm_vfe32_stats_update_ping_pong_addr(struct vfe_device *vfe_dev,
 	struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
 	unsigned long paddr)
 {
-	return;
+	int stats_idx = STATS_IDX(stream_info->stream_handle);
+	msm_camera_io_w(paddr, vfe_dev->vfe_base +
+		VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
 }
 
 static uint32_t msm_vfe32_stats_get_wm_mask(uint32_t irq_status0,
 	uint32_t irq_status1)
 {
-	return 0;
+	return (irq_status0 >> 13) & 0x7F;
 }
 
 static uint32_t msm_vfe32_stats_get_comp_mask(uint32_t irq_status0,
 	uint32_t irq_status1)
 {
-	return 0;
+	return (irq_status0 >> 24) & 0x1;
 }
 
 static uint32_t msm_vfe32_stats_get_frame_id(struct vfe_device *vfe_dev)
 {
-	return 0;
+	return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
 }
 
 static int msm_vfe32_get_platform_data(struct vfe_device *vfe_dev)
@@ -850,6 +992,18 @@
 	.num_rdi_master = 3,
 };
 
+static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
+	.stats_capability_mask =
+		1 << MSM_ISP_STATS_AEC | 1 << MSM_ISP_STATS_BG |
+		1 << MSM_ISP_STATS_AF | 1 << MSM_ISP_STATS_BF |
+		1 << MSM_ISP_STATS_AWB | 1 << MSM_ISP_STATS_IHIST |
+		1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
+		1 << MSM_ISP_STATS_SKIN | 1 << MSM_ISP_STATS_BHIST,
+	.stats_ping_pong_offset = VFE32_STATS_PING_PONG_OFFSET,
+	.num_stats_type = VFE32_NUM_STATS_TYPE,
+	.num_stats_comp_mask = 0,
+};
+
 static struct v4l2_subdev_core_ops msm_vfe32_subdev_core_ops = {
 	.ioctl = msm_isp_ioctl,
 	.subscribe_event = msm_isp_subscribe_event,
@@ -875,11 +1029,12 @@
 			.process_halt_irq = msm_vfe32_process_halt_irq,
 			.process_reg_update = msm_vfe32_process_reg_update,
 			.process_axi_irq = msm_isp_process_axi_irq,
-			.process_stats_irq = msm_vfe32_process_stats_irq,
+			.process_stats_irq = msm_isp_process_stats_irq,
 		},
 		.axi_ops = {
 			.reload_wm = msm_vfe32_axi_reload_wm,
 			.enable_wm = msm_vfe32_axi_enable_wm,
+			.cfg_io_format = msm_vfe32_cfg_io_format,
 			.cfg_comp_mask = msm_vfe32_axi_cfg_comp_mask,
 			.clear_comp_mask = msm_vfe32_axi_clear_comp_mask,
 			.cfg_wm_irq_mask = msm_vfe32_axi_cfg_wm_irq_mask,
@@ -930,6 +1085,7 @@
 	},
 	.dmi_reg_offset = 0x5A0,
 	.axi_hw_info = &msm_vfe32_axi_hw_info,
+	.stats_hw_info = &msm_vfe32_stats_hw_info,
 	.subdev_ops = &msm_vfe32_subdev_ops,
 	.subdev_internal_ops = &msm_vfe32_internal_ops,
 };
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index fa0bf18..f08644f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -369,15 +369,10 @@
 	msm_isp_send_event(vfe_dev, ISP_EVENT_SOF, &sof_event);
 }
 
-void msm_isp_calculate_framedrop(
-	struct msm_vfe_axi_shared_data *axi_data,
-	struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+uint32_t msm_isp_get_framedrop_period(
+	enum msm_vfe_frame_skip_pattern frame_skip_pattern)
 {
-	struct msm_vfe_axi_stream *stream_info =
-		&axi_data->stream_info[
-		(stream_cfg_cmd->axi_stream_handle & 0xFF)];
-	uint32_t framedrop_period = 1;
-	switch (stream_cfg_cmd->frame_skip_pattern) {
+	switch (frame_skip_pattern) {
 	case NO_SKIP:
 	case EVERY_2FRAME:
 	case EVERY_3FRAME:
@@ -386,18 +381,28 @@
 	case EVERY_6FRAME:
 	case EVERY_7FRAME:
 	case EVERY_8FRAME:
-		framedrop_period = stream_cfg_cmd->frame_skip_pattern + 1;
-		break;
+		return frame_skip_pattern + 1;
 	case EVERY_16FRAME:
-		framedrop_period = 16;
+		return 16;
 		break;
 	case EVERY_32FRAME:
-		framedrop_period = 32;
+		return 32;
 		break;
 	default:
-		framedrop_period = 1;
-		break;
+		return 1;
 	}
+	return 1;
+}
+
+void msm_isp_calculate_framedrop(
+	struct msm_vfe_axi_shared_data *axi_data,
+	struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+	struct msm_vfe_axi_stream *stream_info =
+		&axi_data->stream_info[
+		(stream_cfg_cmd->axi_stream_handle & 0xFF)];
+	uint32_t framedrop_period = msm_isp_get_framedrop_period(
+	   stream_cfg_cmd->frame_skip_pattern);
 
 	stream_info->framedrop_pattern = 0x1;
 	stream_info->framedrop_period = framedrop_period - 1;
@@ -911,8 +916,16 @@
 				stream_info->bufq_handle,
 				MSM_ISP_BUFFER_FLUSH_DIVERTED);
 		break;
-	case UPDATE_STREAM_FRAMEDROP_PATTERN:
+	case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+		uint32_t framedrop_period =
+			msm_isp_get_framedrop_period(update_cmd->skip_pattern);
+		stream_info->runtime_init_frame_drop = 0;
+		stream_info->framedrop_pattern = 0x1;
+		stream_info->framedrop_period = framedrop_period - 1;
+		vfe_dev->hw_info->vfe_ops.axi_ops.
+			cfg_framedrop(vfe_dev, stream_info);
 		break;
+	}
 	default:
 		pr_err("%s: Invalid update type\n", __func__);
 		return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 7d0f9cb..691edc3 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -94,6 +94,13 @@
 	if (data > 0x1) {
 		unsigned long jiffes = msecs_to_jiffies(500);
 		long lrc = 0;
+		unsigned long flags;
+
+		spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+		ispif->wait_timeout = 0;
+		init_completion(&ispif->reset_complete);
+		spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
+
 		if (params->vfe_intf == VFE0)
 			msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
 		else
@@ -104,6 +111,11 @@
 		if (lrc < 0 || !lrc) {
 			pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
 			rc = -EIO;
+
+			spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+			ispif->wait_timeout = 1;
+			spin_unlock_irqrestore(
+				&ispif->auto_complete_lock, flags);
 		}
 	}
 	return rc;
@@ -114,6 +126,12 @@
 	int rc = 0;
 	long lrc = 0;
 	unsigned long jiffes = msecs_to_jiffies(500);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+	ispif->wait_timeout = 0;
+	init_completion(&ispif->reset_complete);
+	spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
 
 	BUG_ON(!ispif);
 
@@ -125,14 +143,17 @@
 		msm_camera_io_w_mb(ISPIF_RST_CMD_1_MASK, ispif->base +
 			ISPIF_RST_CMD_1_ADDR);
 
-	CDBG("%s: Sending reset\n", __func__);
 	lrc = wait_for_completion_interruptible_timeout(
 		&ispif->reset_complete, jiffes);
+
 	if (lrc < 0 || !lrc) {
 		pr_err("%s: wait timeout ret = %ld\n", __func__, lrc);
 		rc = -EIO;
+
+		spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+		ispif->wait_timeout = 1;
+		spin_unlock_irqrestore(&ispif->auto_complete_lock, flags);
 	}
-	CDBG("%s: reset returned\n", __func__);
 
 	return rc;
 }
@@ -571,8 +592,14 @@
 		ispif->base + ISPIF_IRQ_CLEAR_2_ADDR);
 
 	if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
-		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
-			complete(&ispif->reset_complete);
+		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) {
+			unsigned long flags;
+			spin_lock_irqsave(&ispif->auto_complete_lock, flags);
+			if (ispif->wait_timeout == 0)
+				complete(&ispif->reset_complete);
+			spin_unlock_irqrestore(
+				&ispif->auto_complete_lock, flags);
+		}
 
 		if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
 			pr_err("%s: VFE0 pix0 overflow.\n", __func__);
@@ -709,8 +736,6 @@
 		goto error_irq;
 	}
 
-	init_completion(&ispif->reset_complete);
-
 	rc = msm_ispif_reset(ispif);
 	if (rc == 0) {
 		ispif->ispif_state = ISPIF_POWER_UP;
@@ -830,13 +855,8 @@
 	struct ispif_device *ispif = v4l2_get_subdevdata(sd);
 
 	mutex_lock(&ispif->mutex);
-	if (ispif->open_cnt > 0) {
-		CDBG("%s: dev already open\n", __func__);
-		goto end;
-	}
 	/* mem remap is done in init when the clock is on */
 	ispif->open_cnt++;
-end:
 	mutex_unlock(&ispif->mutex);
 	return 0;
 }
@@ -940,7 +960,8 @@
 	ispif->pdev = pdev;
 	ispif->ispif_state = ISPIF_POWER_DOWN;
 	ispif->open_cnt = 0;
-
+	spin_lock_init(&ispif->auto_complete_lock);
+	ispif->wait_timeout = 0;
 	return 0;
 
 error:
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
index c4418c1..f8c3cce 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -48,6 +48,8 @@
 	struct mutex mutex;
 	uint8_t start_ack_pending;
 	struct completion reset_complete;
+	spinlock_t auto_complete_lock;
+	uint8_t wait_timeout;
 	uint32_t csid_version;
 	int enb_dump_reg;
 	uint32_t open_cnt;
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 08a4566..6974cb4 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -461,9 +461,6 @@
 static inline int __msm_sd_close_session_streams(struct v4l2_subdev *sd,
 	struct msm_sd_close_ioctl *sd_close)
 {
-	v4l2_subdev_call(sd, core, ioctl,
-		MSM_SD_CLOSE_SESSION_AND_STREAM, &sd_close);
-
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
index 9af6674..8a21512 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -35,7 +35,9 @@
 	}
 	new_entry->session_id = buf_info->session_id;
 	new_entry->stream_id = buf_info->stream_id;
+	mutex_lock(&buf_mngr_dev->buf_q_lock);
 	list_add_tail(&new_entry->entry, &buf_mngr_dev->buf_qhead);
+	mutex_unlock(&buf_mngr_dev->buf_q_lock);
 	buf_info->index = new_entry->vb2_buf->v4l2_buf.index;
 	return 0;
 }
@@ -45,6 +47,8 @@
 {
 	struct msm_get_bufs *bufs, *save;
 	int ret = -EINVAL;
+
+	mutex_lock(&buf_mngr_dev->buf_q_lock);
 	list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
 		if ((bufs->session_id == buf_info->session_id) &&
 			(bufs->stream_id == buf_info->stream_id) &&
@@ -60,6 +64,7 @@
 			break;
 		}
 	}
+	mutex_unlock(&buf_mngr_dev->buf_q_lock);
 	return ret;
 }
 
@@ -70,6 +75,7 @@
 	struct msm_get_bufs *bufs, *save;
 	int ret = -EINVAL;
 
+	mutex_lock(&buf_mngr_dev->buf_q_lock);
 	list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
 		if ((bufs->session_id == buf_info->session_id) &&
 			(bufs->stream_id == buf_info->stream_id) &&
@@ -81,6 +87,7 @@
 			break;
 		}
 	}
+	mutex_unlock(&buf_mngr_dev->buf_q_lock);
 	return ret;
 }
 
@@ -156,12 +163,14 @@
 		&msm_buf_mngr_dev->vb2_ops);
 
 	INIT_LIST_HEAD(&msm_buf_mngr_dev->buf_qhead);
+	mutex_init(&msm_buf_mngr_dev->buf_q_lock);
 end:
 	return rc;
 }
 
 static void __exit msm_buf_mngr_exit(void)
 {
+	mutex_destroy(&msm_buf_mngr_dev->buf_q_lock);
 	kfree(msm_buf_mngr_dev);
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
index 7e588cc..a2b3a7e 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
@@ -33,6 +33,7 @@
 
 struct msm_buf_mngr_device {
 	struct list_head buf_qhead;
+	struct mutex buf_q_lock;
 	struct msm_sd_subdev subdev;
 	struct msm_sd_req_vb2_q vb2_ops;
 };
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index 6ea86ae..22131f8 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -105,7 +105,7 @@
 	unsigned long flags;
 
 	stream = msm_get_stream(session_id, stream_id);
-	if (!stream)
+	if (IS_ERR_OR_NULL(stream))
 		return NULL;
 
 	spin_lock_irqsave(&stream->stream_lock, flags);
@@ -163,7 +163,7 @@
 	int rc = 0;
 
 	stream = msm_get_stream(session_id, stream_id);
-	if (!stream)
+	if (IS_ERR_OR_NULL(stream))
 		return 0;
 	spin_lock_irqsave(&stream->stream_lock, flags);
 	if (vb) {
@@ -172,6 +172,7 @@
 		/* put buf before buf done */
 		if (msm_vb2->in_freeq) {
 			vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+			msm_vb2->in_freeq = 0;
 			rc = 0;
 		} else
 			rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 9f0ad19..637bce3 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -553,8 +553,6 @@
 	struct msm_cpp_frame_info_t *new_frame =
 		kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
 	uint32_t *cpp_frame_msg;
-	struct ion_handle *src_ion_handle = NULL;
-	struct ion_handle *dest_ion_handle = NULL;
 	unsigned long len;
 	unsigned long in_phyaddr, out_phyaddr;
 	uint16_t num_stripes = 0;
@@ -595,41 +593,41 @@
 	CPP_DBG("CPP in_fd: %d out_fd: %d\n", new_frame->src_fd,
 		new_frame->dst_fd);
 
-	src_ion_handle = ion_import_dma_buf(cpp_dev->client,
+	new_frame->src_ion_handle = ion_import_dma_buf(cpp_dev->client,
 		new_frame->src_fd);
-	if (IS_ERR_OR_NULL(src_ion_handle)) {
+	if (IS_ERR_OR_NULL(new_frame->src_ion_handle)) {
 		pr_err("ION import failed\n");
-		rc = PTR_ERR(src_ion_handle);
+		rc = PTR_ERR(new_frame->src_ion_handle);
 		goto ERROR2;
 	}
-	rc = ion_map_iommu(cpp_dev->client, src_ion_handle,
+
+	rc = ion_map_iommu(cpp_dev->client, new_frame->src_ion_handle,
 		cpp_dev->domain_num, 0, SZ_4K, 0,
 		(unsigned long *)&in_phyaddr, &len, 0, 0);
 	if (rc < 0) {
 		pr_err("ION import failed\n");
-		rc = PTR_ERR(src_ion_handle);
+		rc = PTR_ERR(new_frame->src_ion_handle);
 		goto ERROR3;
 	}
 
 	CPP_DBG("in phy addr: 0x%x len: %ld\n", (uint32_t) in_phyaddr, len);
-
-	dest_ion_handle = ion_import_dma_buf(cpp_dev->client,
+	new_frame->dest_ion_handle = ion_import_dma_buf(cpp_dev->client,
 		new_frame->dst_fd);
-	if (IS_ERR_OR_NULL(dest_ion_handle)) {
+	if (IS_ERR_OR_NULL(new_frame->dest_ion_handle)) {
 		pr_err("ION import failed\n");
-		rc = PTR_ERR(dest_ion_handle);
+		rc = PTR_ERR(new_frame->dest_ion_handle);
 		goto ERROR4;
 	}
-	rc = ion_map_iommu(cpp_dev->client, dest_ion_handle,
+
+	rc = ion_map_iommu(cpp_dev->client, new_frame->dest_ion_handle,
 		cpp_dev->domain_num, 0, SZ_4K, 0,
 		(unsigned long *)&out_phyaddr, &len, 0, 0);
 	if (rc < 0) {
-		rc = PTR_ERR(dest_ion_handle);
+		rc = PTR_ERR(new_frame->dest_ion_handle);
 		goto ERROR5;
 	}
 
 	CPP_DBG("out phy addr: 0x%x len: %ld\n", (uint32_t)out_phyaddr, len);
-
 	num_stripes = ((cpp_frame_msg[12] >> 20) & 0x3FF) +
 		((cpp_frame_msg[12] >> 10) & 0x3FF) +
 		(cpp_frame_msg[12] & 0x3FF);
@@ -667,15 +665,17 @@
 ERROR7:
 	kfree(frame_qcmd);
 ERROR6:
-	ion_unmap_iommu(cpp_dev->client, dest_ion_handle,
+	ion_unmap_iommu(cpp_dev->client, new_frame->dest_ion_handle,
 		cpp_dev->domain_num, 0);
 ERROR5:
-	ion_free(cpp_dev->client, dest_ion_handle);
+	ion_free(cpp_dev->client, new_frame->dest_ion_handle);
+	new_frame->dest_ion_handle = NULL;
 ERROR4:
-	ion_unmap_iommu(cpp_dev->client, src_ion_handle,
+	ion_unmap_iommu(cpp_dev->client, new_frame->src_ion_handle,
 		cpp_dev->domain_num, 0);
 ERROR3:
-	ion_free(cpp_dev->client, src_ion_handle);
+	ion_free(cpp_dev->client, new_frame->src_ion_handle);
+	new_frame->src_ion_handle = NULL;
 ERROR2:
 	kfree(cpp_frame_msg);
 ERROR1:
@@ -719,6 +719,24 @@
 					mutex_unlock(&cpp_dev->mutex);
 					return -EINVAL;
 		}
+		if (process_frame->dest_ion_handle) {
+			ion_unmap_iommu(cpp_dev->client,
+				process_frame->dest_ion_handle,
+				cpp_dev->domain_num, 0);
+			ion_free(cpp_dev->client,
+				process_frame->dest_ion_handle);
+			process_frame->dest_ion_handle = NULL;
+		}
+
+		if (process_frame->src_ion_handle) {
+			ion_unmap_iommu(cpp_dev->client,
+				process_frame->src_ion_handle,
+				cpp_dev->domain_num, 0);
+			ion_free(cpp_dev->client,
+				process_frame->src_ion_handle);
+			process_frame->src_ion_handle = NULL;
+		}
+
 		kfree(process_frame->cpp_cmd_msg);
 		kfree(process_frame);
 		kfree(event_qcmd);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/Makefile b/drivers/media/platform/msm/camera_v2/sensor/Makefile
index f6011ba..6f941f7 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/Makefile
@@ -6,5 +6,6 @@
 obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ flash/
 obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor.o
 obj-$(CONFIG_S5K3L1YX) += s5k3l1yx.o
+obj-$(CONFIG_IMX135) += imx135.o
 obj-$(CONFIG_OV2720) += ov2720.o
 obj-$(CONFIG_MT9M114) += mt9m114.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index 2999a23..2c8c8b8 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -147,7 +147,6 @@
 {
 	uint32_t irq;
 	struct csid_device *csid_dev = data;
-	uint32_t val = 0;
 	void __iomem *csidbase;
 	csidbase = csid_dev->base;
 
@@ -164,12 +163,6 @@
 		pr_debug("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
 			__func__, csid_dev->pdev->id, irq);
 		irq_count++;
-		if (irq_count >= 5) {
-			msm_camera_io_w(0x7f010800 | val,
-				csidbase + CSID_IRQ_MASK_ADDR);
-			msm_camera_io_w(0x7f010800 | val,
-				csidbase + CSID_IRQ_CLEAR_CMD_ADDR);
-		}
 	}
 	msm_camera_io_w(irq, csid_dev->base + CSID_IRQ_CLEAR_CMD_ADDR);
 	return IRQ_HANDLED;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/imx135.c b/drivers/media/platform/msm/camera_v2/sensor/imx135.c
new file mode 100644
index 0000000..c9476ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/imx135.c
@@ -0,0 +1,149 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "msm_sensor.h"
+#define IMX135_SENSOR_NAME "imx135"
+DEFINE_MSM_MUTEX(imx135_mut);
+
+static struct msm_sensor_ctrl_t imx135_s_ctrl;
+
+static struct msm_sensor_power_setting imx135_power_setting[] = {
+	{
+		.seq_type = SENSOR_VREG,
+		.seq_val = CAM_VDIG,
+		.config_val = 0,
+		.delay = 0,
+	},
+	{
+		.seq_type = SENSOR_VREG,
+		.seq_val = CAM_VANA,
+		.config_val = 0,
+		.delay = 0,
+	},
+	{
+		.seq_type = SENSOR_VREG,
+		.seq_val = CAM_VIO,
+		.config_val = 0,
+		.delay = 0,
+	},
+	{
+		.seq_type = SENSOR_GPIO,
+		.seq_val = SENSOR_GPIO_RESET,
+		.config_val = GPIO_OUT_LOW,
+		.delay = 1,
+	},
+	{
+		.seq_type = SENSOR_GPIO,
+		.seq_val = SENSOR_GPIO_RESET,
+		.config_val = GPIO_OUT_HIGH,
+		.delay = 30,
+	},
+	{
+		.seq_type = SENSOR_CLK,
+		.seq_val = SENSOR_CAM_MCLK,
+		.config_val = 0,
+		.delay = 1,
+	},
+	{
+		.seq_type = SENSOR_I2C_MUX,
+		.seq_val = 0,
+		.config_val = 0,
+		.delay = 0,
+	},
+};
+
+static struct v4l2_subdev_info imx135_subdev_info[] = {
+	{
+		.code = V4L2_MBUS_FMT_SBGGR10_1X10,
+		.colorspace = V4L2_COLORSPACE_JPEG,
+		.fmt = 1,
+		.order = 0,
+	},
+};
+
+static const struct i2c_device_id imx135_i2c_id[] = {
+	{IMX135_SENSOR_NAME, (kernel_ulong_t)&imx135_s_ctrl},
+	{ }
+};
+
+static struct i2c_driver imx135_i2c_driver = {
+	.id_table = imx135_i2c_id,
+	.probe  = msm_sensor_i2c_probe,
+	.driver = {
+		.name = IMX135_SENSOR_NAME,
+	},
+};
+
+static struct msm_camera_i2c_client imx135_sensor_i2c_client = {
+	.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+static const struct of_device_id imx135_dt_match[] = {
+	{.compatible = "qcom,imx135", .data = &imx135_s_ctrl},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, imx135_dt_match);
+
+static struct platform_driver imx135_platform_driver = {
+	.driver = {
+		.name = "qcom,imx135",
+		.owner = THIS_MODULE,
+		.of_match_table = imx135_dt_match,
+	},
+};
+
+static int32_t imx135_platform_probe(struct platform_device *pdev)
+{
+	int32_t rc = 0;
+	const struct of_device_id *match;
+	match = of_match_device(imx135_dt_match, &pdev->dev);
+	rc = msm_sensor_platform_probe(pdev, match->data);
+	return rc;
+}
+
+static int __init imx135_init_module(void)
+{
+	int32_t rc = 0;
+	pr_info("%s:%d\n", __func__, __LINE__);
+	rc = platform_driver_probe(&imx135_platform_driver,
+		imx135_platform_probe);
+	if (!rc)
+		return rc;
+	pr_err("%s:%d rc %d\n", __func__, __LINE__, rc);
+	return i2c_add_driver(&imx135_i2c_driver);
+}
+
+static void __exit imx135_exit_module(void)
+{
+	pr_info("%s:%d\n", __func__, __LINE__);
+	if (imx135_s_ctrl.pdev) {
+		msm_sensor_free_sensor_data(&imx135_s_ctrl);
+		platform_driver_unregister(&imx135_platform_driver);
+	} else
+		i2c_del_driver(&imx135_i2c_driver);
+	return;
+}
+
+static struct msm_sensor_ctrl_t imx135_s_ctrl = {
+	.sensor_i2c_client = &imx135_sensor_i2c_client,
+	.power_setting_array.power_setting = imx135_power_setting,
+	.power_setting_array.size = ARRAY_SIZE(imx135_power_setting),
+	.msm_sensor_mutex = &imx135_mut,
+	.sensor_v4l2_subdev_info = imx135_subdev_info,
+	.sensor_v4l2_subdev_info_size = ARRAY_SIZE(imx135_subdev_info),
+};
+
+module_init(imx135_init_module);
+module_exit(imx135_exit_module);
+MODULE_DESCRIPTION("imx135");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 63ec1cf..0641162 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -18,7 +18,7 @@
 #include "mpq_dmx_plugin_common.h"
 #include "mpq_sdmx.h"
 
-#define SDMX_MAJOR_VERSION_MATCH	(2)
+#define SDMX_MAJOR_VERSION_MATCH	(3)
 
 #define TS_PACKET_HEADER_LENGTH (4)
 
@@ -847,7 +847,7 @@
 	mpq_dmx_info.devices = NULL;
 	mpq_dmx_info.ion_client = NULL;
 
-	mpq_sdmx_check_app_loaded();
+	mpq_dmx_info.secure_demux_app_loaded = 0;
 
 	/*
 	 * TODO: the following should be set based on the decoder:
@@ -903,6 +903,8 @@
 
 		mutex_init(&mpq_demux->mutex);
 
+		mpq_demux->num_secure_feeds = 0;
+		mpq_demux->num_active_feeds = 0;
 		mpq_demux->sdmx_filter_count = 0;
 		mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
 
@@ -1230,7 +1232,7 @@
 		return -EINVAL;
 	}
 
-	if (mpq_dmx_is_video_feed(feed)) {
+	if (dvb_dmx_is_video_feed(feed)) {
 		struct mpq_video_feed_info *feed_data;
 		struct mpq_feed *mpq_feed;
 		struct mpq_streambuffer *stream_buffer;
@@ -1957,9 +1959,10 @@
 		}
 
 		mpq_sdmx_close_session(mpq_demux);
+		mpq_demux->num_secure_feeds--;
 	}
 
-	if (mpq_dmx_is_video_feed(feed)) {
+	if (dvb_dmx_is_video_feed(feed)) {
 		ret = mpq_dmx_terminate_video_feed(mpq_feed);
 		if (ret)
 			MPQ_DVB_ERR_PRINT(
@@ -1973,6 +1976,7 @@
 	}
 
 	mpq_sdmx_terminate_metadata_buffer(mpq_feed);
+	mpq_demux->num_active_feeds--;
 
 	mutex_unlock(&mpq_demux->mutex);
 
@@ -1982,7 +1986,7 @@
 
 int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed)
 {
-	if (mpq_dmx_is_video_feed(feed)) {
+	if (dvb_dmx_is_video_feed(feed)) {
 		struct mpq_feed *mpq_feed;
 		struct mpq_video_feed_info *feed_data;
 
@@ -2056,7 +2060,7 @@
 	struct mpq_feed *mpq_feed;
 	int ret = 0;
 
-	if (!mpq_dmx_is_video_feed(feed)) {
+	if (!dvb_dmx_is_video_feed(feed)) {
 		MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
 			__func__,
 			feed->pes_type);
@@ -2139,7 +2143,7 @@
 
 int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed)
 {
-	if (mpq_dmx_is_video_feed(feed)) {
+	if (dvb_dmx_is_video_feed(feed)) {
 		struct mpq_feed *mpq_feed;
 		struct mpq_video_feed_info *feed_data;
 		struct dvb_ringbuffer *video_buff;
@@ -3087,7 +3091,7 @@
 	struct mpq_streambuffer *video_buff;
 	struct mpq_feed *mpq_feed;
 
-	if (!mpq_dmx_is_video_feed(feed)) {
+	if (!dvb_dmx_is_video_feed(feed)) {
 		MPQ_DVB_ERR_PRINT(
 			"%s: Invalid feed type %d\n",
 			__func__,
@@ -3230,67 +3234,6 @@
 }
 EXPORT_SYMBOL(mpq_dmx_process_pcr_packet);
 
-int mpq_dmx_set_secure_mode(struct dvb_demux_feed *feed,
-	struct dmx_secure_mode *sec_mode)
-{
-	struct mpq_feed *mpq_feed;
-	struct mpq_demux *mpq_demux;
-	int ret;
-
-	if (!feed || !feed->priv || !sec_mode) {
-		MPQ_DVB_ERR_PRINT(
-			"%s: invalid parameters\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
-		__func__, sec_mode->pid,
-		sec_mode->is_secured,
-		sec_mode->key_ladder_id);
-
-	mpq_feed = feed->priv;
-	mpq_demux = mpq_feed->mpq_demux;
-
-	mutex_lock(&mpq_demux->mutex);
-
-	/*
-	 * If secure demux is active, set the KL now,
-	 * otherwise it will be set when secure-demux is started
-	 * (when filtering starts).
-	 */
-	if (mpq_demux->sdmx_session_handle !=
-		SDMX_INVALID_SESSION_HANDLE) {
-		if (sec_mode->is_secured) {
-			MPQ_DVB_DBG_PRINT(
-				"%s: set key-ladder %d to PID %d\n",
-				__func__,
-				sec_mode->key_ladder_id,
-				sec_mode->pid);
-			ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
-				sec_mode->pid, sec_mode->key_ladder_id);
-			if (ret) {
-				MPQ_DVB_ERR_PRINT(
-					"%s: FAILED to set keyladder, ret=%d\n",
-					__func__, ret);
-				ret = -EINVAL;
-			}
-		} else {
-			MPQ_DVB_DBG_PRINT("%s: setting non-secure mode\n",
-				__func__);
-			ret = 0;
-		}
-	} else {
-		MPQ_DVB_DBG_PRINT("%s: SDMX not started yet\n", __func__);
-		ret = 0;
-	}
-
-	mutex_unlock(&mpq_demux->mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL(mpq_dmx_set_secure_mode);
-
 int mpq_sdmx_open_session(struct mpq_demux *mpq_demux)
 {
 	enum sdmx_status ret = SDMX_SUCCESS;
@@ -3409,7 +3352,7 @@
 
 	*buf_mode = SDMX_RING_BUF;
 
-	if (mpq_dmx_is_video_feed(feed->dvb_demux_feed)) {
+	if (dvb_dmx_is_video_feed(feed->dvb_demux_feed)) {
 		if (feed_data->buffer_desc.decoder_buffers_num > 1)
 			*buf_mode = SDMX_LINEAR_GROUP_BUF;
 		*num_buffers = feed_data->buffer_desc.decoder_buffers_num;
@@ -3429,8 +3372,8 @@
 		}
 	} else {
 		*num_buffers = 1;
-		if (mpq_dmx_is_sec_feed(dvbdmx_feed) ||
-			mpq_dmx_is_pcr_feed(dvbdmx_feed)) {
+		if (dvb_dmx_is_sec_feed(dvbdmx_feed) ||
+			dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
 			buffer = &feed->sdmx_buf;
 			sdmx_buff = feed->sdmx_buf_handle;
 		} else {
@@ -3481,18 +3424,18 @@
 
 	feed = dvbdmx_feed->priv;
 
-	if (mpq_dmx_is_sec_feed(dvbdmx_feed)) {
+	if (dvb_dmx_is_sec_feed(dvbdmx_feed)) {
 		feed->filter_type = SDMX_SECTION_FILTER;
 		if (dvbdmx_feed->feed.sec.check_crc)
 			filter_flags |= SDMX_FILTER_FLAG_VERIFY_SECTION_CRC;
 		MPQ_DVB_DBG_PRINT("%s: SDMX_SECTION_FILTER\n", __func__);
-	} else if (mpq_dmx_is_pcr_feed(dvbdmx_feed)) {
+	} else if (dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
 		feed->filter_type = SDMX_PCR_FILTER;
 		MPQ_DVB_DBG_PRINT("%s: SDMX_PCR_FILTER\n", __func__);
-	} else if (mpq_dmx_is_video_feed(dvbdmx_feed)) {
+	} else if (dvb_dmx_is_video_feed(dvbdmx_feed)) {
 		feed->filter_type = SDMX_SEPARATED_PES_FILTER;
 		MPQ_DVB_DBG_PRINT("%s: SDMX_SEPARATED_PES_FILTER\n", __func__);
-	} else if (mpq_dmx_is_rec_feed(dvbdmx_feed)) {
+	} else if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
 		feed->filter_type = SDMX_RAW_FILTER;
 		switch (dvbdmx_feed->tsp_out_format) {
 		case (DMX_TSP_FORMAT_188):
@@ -3546,7 +3489,7 @@
 		/* Meta-data initialization,
 		 * Recording filters do no need meta-data buffers.
 		 */
-		if (mpq_dmx_is_rec_feed(dvbdmx_feed)) {
+		if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
 			metadata_buff_desc.base_addr = 0;
 			metadata_buff_desc.size = 0;
 		} else {
@@ -3640,6 +3583,63 @@
 	return ret;
 }
 
+/**
+ * mpq_sdmx_init_feed - initialize secure demux related elements of mpq feed
+ *
+ * @mpq_demux: mpq_demux object
+ * @mpq_feed: mpq_feed object
+ *
+ * Note: the function assumes mpq_demux->mutex locking is done by caller.
+ */
+static int mpq_sdmx_init_feed(struct mpq_demux *mpq_demux,
+	struct mpq_feed *mpq_feed)
+{
+	int ret;
+
+	ret = mpq_sdmx_open_session(mpq_demux);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_sdmx_open_session failed, ret=%d\n",
+			__func__, ret);
+
+		ret = -ENODEV;
+		goto init_sdmx_feed_failed;
+	}
+
+	/* PCR and sections have internal buffer for SDMX */
+	if (dvb_dmx_is_pcr_feed(mpq_feed->dvb_demux_feed))
+		ret = mpq_sdmx_alloc_data_buf(mpq_feed, SDMX_PCR_BUFFER_SIZE);
+	else if (dvb_dmx_is_sec_feed(mpq_feed->dvb_demux_feed))
+		ret = mpq_sdmx_alloc_data_buf(mpq_feed,
+			SDMX_SECTION_BUFFER_SIZE);
+	else
+		ret = 0;
+
+	if (ret) {
+		MPQ_DVB_ERR_PRINT("%s: init buffer failed, ret=%d\n",
+			__func__, ret);
+		goto init_sdmx_feed_failed_free_sdmx;
+	}
+
+	ret = mpq_sdmx_filter_setup(mpq_demux, mpq_feed->dvb_demux_feed);
+	if (ret) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: mpq_sdmx_filter_setup failed, ret=%d\n",
+			__func__, ret);
+		goto init_sdmx_feed_failed_free_data_buff;
+	}
+
+	mpq_demux->num_secure_feeds++;
+	return 0;
+
+init_sdmx_feed_failed_free_data_buff:
+	mpq_sdmx_free_data_buf(mpq_feed);
+init_sdmx_feed_failed_free_sdmx:
+	mpq_sdmx_close_session(mpq_demux);
+init_sdmx_feed_failed:
+	return ret;
+}
+
 int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed)
 {
 	int ret = 0;
@@ -3648,80 +3648,113 @@
 
 	mutex_lock(&mpq_demux->mutex);
 
-	if (mpq_dmx_is_video_feed(feed)) {
-		ret = mpq_dmx_init_video_feed(mpq_feed);
-
-		if (ret) {
-			MPQ_DVB_ERR_PRINT(
-				"%s: mpq_dmx_init_video_feed failed, ret=%d\n",
-				__func__, ret);
-			goto init_mpq_feed_failed;
-		}
-	}
-
 	mpq_feed->sdmx_buf_handle = NULL;
 	mpq_feed->metadata_buf_handle = NULL;
 	mpq_feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
 
-	if (!mpq_sdmx_is_loaded()) {
-		/* nothing more to do */
-		mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
-		mutex_unlock(&mpq_demux->mutex);
-		return ret;
+	if (dvb_dmx_is_video_feed(feed)) {
+		ret = mpq_dmx_init_video_feed(mpq_feed);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_dmx_init_video_feed failed, ret=%d\n",
+				__func__, ret);
+			goto init_mpq_feed_end;
+		}
 	}
 
-	/* Further initializations for secure demux */
-	ret = mpq_sdmx_open_session(mpq_demux);
+	/*
+	 * sdmx is not relevant for recording filters, which always use
+	 * regular filters (non-sdmx)
+	 */
+	if (!mpq_sdmx_is_loaded() || !feed->secure_mode.is_secured ||
+		dvb_dmx_is_rec_feed(feed)) {
+		if (!mpq_sdmx_is_loaded())
+			mpq_demux->sdmx_session_handle =
+				SDMX_INVALID_SESSION_HANDLE;
+		goto init_mpq_feed_end;
+	}
+
+	 /* Initialization of secure demux filters (PES/PCR/Video/Section) */
+	ret = mpq_sdmx_init_feed(mpq_demux, mpq_feed);
 	if (ret) {
 		MPQ_DVB_ERR_PRINT(
-			"%s: mpq_sdmx_open_session failed, ret=%d\n",
+			"%s: mpq_sdmx_init_feed failed, ret=%d\n",
 			__func__, ret);
-
-		ret = -ENODEV;
-		goto init_mpq_feed_failed_free_video;
+		if (dvb_dmx_is_video_feed(feed))
+			mpq_dmx_terminate_video_feed(mpq_feed);
 	}
 
-	/* PCR and sections have internal buffer for SDMX */
-	if (mpq_dmx_is_pcr_feed(feed))
-		ret = mpq_sdmx_alloc_data_buf(mpq_feed,
-			SDMX_PCR_BUFFER_SIZE);
-	else if (mpq_dmx_is_sec_feed(feed))
-		ret = mpq_sdmx_alloc_data_buf(mpq_feed,
-			SDMX_SECTION_BUFFER_SIZE);
-	else
-		ret = 0;
-
-	if (ret) {
-		MPQ_DVB_ERR_PRINT(
-			"%s: init buffer failed, ret=%d\n",
-			__func__, ret);
-		goto init_mpq_feed_failed_free_sdmx;
-	}
-
-	ret = mpq_sdmx_filter_setup(mpq_demux, feed);
-	if (ret) {
-		MPQ_DVB_ERR_PRINT(
-			"%s: mpq_sdmx_filter_setup failed, ret=%d\n",
-			__func__, ret);
-		goto init_mpq_feed_failed_free_data_buff;
-	}
-
-	mutex_unlock(&mpq_demux->mutex);
-	return 0;
-
-init_mpq_feed_failed_free_data_buff:
-	mpq_sdmx_free_data_buf(mpq_feed);
-init_mpq_feed_failed_free_sdmx:
-	mpq_sdmx_close_session(mpq_demux);
-init_mpq_feed_failed_free_video:
-	if (mpq_dmx_is_video_feed(feed))
-		mpq_dmx_terminate_video_feed(mpq_feed);
-init_mpq_feed_failed:
+init_mpq_feed_end:
+	if (!ret)
+		mpq_demux->num_active_feeds++;
 	mutex_unlock(&mpq_demux->mutex);
 	return ret;
 }
 EXPORT_SYMBOL(mpq_dmx_init_mpq_feed);
 
+/**
+ * Note: Called only when filter is in "GO" state - after feed has been started.
+ */
+int mpq_dmx_set_secure_mode(struct dvb_demux_feed *feed,
+	struct dmx_secure_mode *sec_mode)
+{
+	struct mpq_feed *mpq_feed;
+	struct mpq_demux *mpq_demux;
+	int ret = 0;
+
+	if (!feed || !feed->priv || !sec_mode) {
+		MPQ_DVB_ERR_PRINT(
+			"%s: invalid parameters\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
+		__func__, sec_mode->pid,
+		sec_mode->is_secured,
+		sec_mode->key_ladder_id);
+
+	mpq_feed = feed->priv;
+	mpq_demux = mpq_feed->mpq_demux;
+
+	mutex_lock(&mpq_demux->mutex);
+
+	if (feed->secure_mode.is_secured != sec_mode->is_secured) {
+		/*
+		 * Switching between secure & non-secure mode is not allowed
+		 * while filter is running
+		 */
+		MPQ_DVB_ERR_PRINT(
+			"%s: Cannot switch between secure mode while filter is running\n",
+			__func__);
+		mutex_unlock(&mpq_demux->mutex);
+		return -EPERM;
+	}
+
+	/*
+	 * Feed is running in secure mode, this secure mode request is to
+	 * update the key ladder id
+	 */
+	if (feed->secure_mode.pid == sec_mode->pid && sec_mode->is_secured &&
+		feed->secure_mode.key_ladder_id != sec_mode->key_ladder_id &&
+		mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) {
+		ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+			sec_mode->pid,
+			sec_mode->key_ladder_id);
+		if (ret) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: FAILED to set key ladder, ret=%d\n",
+				__func__, ret);
+			ret = -ENODEV;
+		}
+	}
+
+	mutex_unlock(&mpq_demux->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(mpq_dmx_set_secure_mode);
+
 static void mpq_sdmx_prepare_filter_status(struct mpq_demux *mpq_demux,
 	struct sdmx_filter_status *filter_sts,
 	struct mpq_feed *mpq_feed)
@@ -3742,11 +3775,11 @@
 		__func__, filter_sts->metadata_fill_count,
 		filter_sts->metadata_write_offset);
 
-	if (!mpq_dmx_is_video_feed(feed)) {
+	if (!dvb_dmx_is_video_feed(feed)) {
 		struct dvb_ringbuffer *buffer;
 
-		if (mpq_dmx_is_sec_feed(feed) ||
-			mpq_dmx_is_pcr_feed(feed)) {
+		if (dvb_dmx_is_sec_feed(feed) ||
+			dvb_dmx_is_pcr_feed(feed)) {
 			buffer = (struct dvb_ringbuffer *)
 				&mpq_feed->sdmx_buf;
 		} else {
@@ -4521,7 +4554,7 @@
 	int total_bytes_read = 0;
 	int limit = mpq_sdmx_proc_limit * mpq_demux->demux.ts_packet_size;
 
-	do {
+	while (fill_count >= mpq_demux->demux.ts_packet_size) {
 		todo = fill_count > limit ? limit : fill_count;
 		ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
 			read_offset);
@@ -4541,7 +4574,7 @@
 				__func__, ret);
 			break;
 		}
-	} while (fill_count > 0);
+	}
 
 	return total_bytes_read;
 }
@@ -4584,6 +4617,7 @@
 {
 	struct dvb_demux *dvb_demux;
 	struct mpq_demux *mpq_demux;
+	int ret = count;
 
 	if (demux == NULL)
 		return -EINVAL;
@@ -4591,25 +4625,50 @@
 	dvb_demux = demux->priv;
 	mpq_demux = dvb_demux->priv;
 
-	if (mpq_sdmx_is_loaded()) {
-		/* route through secure demux */
-		return mpq_sdmx_write(mpq_demux,
+	/* Route through secure demux - process secure feeds if any exist */
+	if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+		ret = mpq_sdmx_write(mpq_demux,
 			demux->dvr_input.priv_handle,
 			buf,
 			count);
-	} else {
-		/* route through sw filter */
-		dvb_dmx_swfilter_format(dvb_demux, buf, count,
-			dvb_demux->tsp_format);
-		if (signal_pending(current))
-			return -EINTR;
-		return count;
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_sdmx_write failed. ret = %d\n",
+				__func__, ret);
+			ret = count;
+		}
 	}
+
+	/*
+	 * Route through sw filter - process non-secure feeds if any exist.
+	 * For sw filter, should process the same amount of bytes the sdmx
+	 * process managed to consume, unless some sdmx error occurred, for
+	 * which should process the whole buffer
+	 */
+	if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds) {
+		dvb_dmx_swfilter_format(dvb_demux, buf, ret,
+			dvb_demux->tsp_format);
+	}
+
+	if (signal_pending(current))
+		return -EINTR;
+
+	return ret;
 }
 EXPORT_SYMBOL(mpq_dmx_write);
 
 int mpq_sdmx_is_loaded(void)
 {
-	return mpq_bypass_sdmx ? 0 : mpq_dmx_info.secure_demux_app_loaded;
+	static int sdmx_load_checked;
+
+	if (mpq_bypass_sdmx)
+		return 0;
+
+	if (!sdmx_load_checked) {
+		mpq_sdmx_check_app_loaded();
+		sdmx_load_checked = 1;
+	}
+
+	return mpq_dmx_info.secure_demux_app_loaded;
 }
 EXPORT_SYMBOL(mpq_sdmx_is_loaded);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
index 2c2420b..7affcc6 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -362,6 +362,9 @@
  * @ion_client: ION demux client used to allocate memory from ION.
  * @mutex: Lock used to protect against private feed data
  * @feeds: mpq common feed object pool
+ * @num_active_feeds: Number of active mpq feeds
+ * @num_secure_feeds: Number of secure feeds (have a sdmx filter associated)
+ * currently allocated.
  * @filters_status: Array holding buffers status for each secure demux filter.
  * Used before each call to sdmx_process() to build up to date state.
  * @sdmx_session_handle: Secure demux open session handle
@@ -406,6 +409,8 @@
 	struct ion_client *ion_client;
 	struct mutex mutex;
 	struct mpq_feed feeds[MPQ_MAX_DMX_FILES];
+	u32 num_active_feeds;
+	u32 num_secure_feeds;
 	struct sdmx_filter_status filters_status[MPQ_MAX_DMX_FILES];
 	int sdmx_session_handle;
 	int sdmx_session_ref_count;
@@ -615,86 +620,6 @@
 int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf);
 
 /**
- * mpq_dmx_is_video_feed - Returns whether the PES feed
- * is video one.
- *
- * @feed: The feed to be checked.
- *
- * Return     1 if feed is video feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_video_feed(struct dvb_demux_feed *feed)
-{
-	if (feed->type != DMX_TYPE_TS)
-		return 0;
-
-	if (feed->ts_type & (~TS_DECODER))
-		return 0;
-
-	if ((feed->pes_type == DMX_TS_PES_VIDEO0) ||
-		(feed->pes_type == DMX_TS_PES_VIDEO1) ||
-		(feed->pes_type == DMX_TS_PES_VIDEO2) ||
-		(feed->pes_type == DMX_TS_PES_VIDEO3))
-		return 1;
-
-	return 0;
-}
-
-/**
- * mpq_dmx_is_pcr_feed - Returns whether the PES feed
- * is PCR one.
- *
- * @feed: The feed to be checked.
- *
- * Return     1 if feed is PCR feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_pcr_feed(struct dvb_demux_feed *feed)
-{
-	if (feed->type != DMX_TYPE_TS)
-		return 0;
-
-	if (feed->ts_type & (~TS_DECODER))
-		return 0;
-
-	if ((feed->pes_type == DMX_TS_PES_PCR0) ||
-		(feed->pes_type == DMX_TS_PES_PCR1) ||
-		(feed->pes_type == DMX_TS_PES_PCR2) ||
-		(feed->pes_type == DMX_TS_PES_PCR3))
-		return 1;
-
-	return 0;
-}
-
-/**
- * mpq_dmx_is_sec_feed - Returns whether this is a section feed
- *
- * @feed: The feed to be checked.
- *
- * Return 1 if feed is a section feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_sec_feed(struct dvb_demux_feed *feed)
-{
-	return (feed->type == DMX_TYPE_SEC);
-}
-
-/**
- * mpq_dmx_is_rec_feed - Returns whether this is a recording feed
- *
- * @feed: The feed to be checked.
- *
- * Return 1 if feed is recording feed, 0 otherwise.
- */
-static inline int mpq_dmx_is_rec_feed(struct dvb_demux_feed *feed)
-{
-	if (feed->type != DMX_TYPE_TS)
-		return 0;
-
-	if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY))
-		return 0;
-
-	return 1;
-}
-
-/**
  * mpq_dmx_init_hw_statistics -
  * Extend dvb-demux debugfs with HW statistics.
  *
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
index b29759c..3d48441 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tsif.c
@@ -518,10 +518,10 @@
 				"%s: warnning - len larger than one packet\n",
 				__func__);
 
-	if (mpq_dmx_is_video_feed(feed))
+	if (dvb_dmx_is_video_feed(feed))
 		return mpq_dmx_process_video_packet(feed, buf);
 
-	if (mpq_dmx_is_pcr_feed(feed))
+	if (dvb_dmx_is_pcr_feed(feed))
 		return mpq_dmx_process_pcr_packet(feed, buf);
 
 	return 0;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
index 632e864..beb4cce 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -306,6 +306,30 @@
 }
 
 /**
+ * mpq_dmx_tspp_swfilter_desc - helper function
+ *
+ * Takes a tspp buffer descriptor and send it to the SW filter for demuxing,
+ * one TS packet at a time.
+ *
+ * @mpq_demux - mpq demux object
+ * @tspp_data_desc - tspp buffer descriptor
+ */
+static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
+	const struct tspp_data_descriptor *tspp_data_desc)
+{
+	u32 notif_size;
+	int i;
+
+	notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+	for (i = 0; i < notif_size; i++)
+		dvb_dmx_swfilter_packet(&mpq_demux->demux,
+			((u8 *)tspp_data_desc->virt_base) +
+			i * TSPP_RAW_TTS_SIZE,
+			((u8 *)tspp_data_desc->virt_base) +
+			i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+}
+
+/**
  * Demux TS packets from TSPP by secure-demux.
  * The fucntion assumes the buffer is physically contiguous
  * and that TSPP descriptors are continuous in memory.
@@ -320,37 +344,46 @@
 	struct sdmx_buff_descr input;
 	size_t aggregate_len = 0;
 	size_t aggregate_count = 0;
-	phys_addr_t buff_start_addr;
-	phys_addr_t buff_current_addr = 0;
+	phys_addr_t buff_start_addr_phys;
+	phys_addr_t buff_current_addr_phys = 0;
+	u32 notif_size;
 	int i;
 
 	while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
 		if (0 == aggregate_count)
-			buff_current_addr = tspp_data_desc->phys_base;
+			buff_current_addr_phys = tspp_data_desc->phys_base;
+		notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
 		mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
 			tspp_data_desc->id;
 		aggregate_len += tspp_data_desc->size;
 		aggregate_count++;
-		mpq_demux->hw_notification_size +=
-			tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+		mpq_demux->hw_notification_size += notif_size;
+
+		/* Let SW filter process only if it might be relevant */
+		if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+			mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc);
+
 	}
 
 	if (!aggregate_count)
 		return;
 
-	buff_start_addr = mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
-	input.base_addr = (void *)buff_start_addr;
+	buff_start_addr_phys =
+		mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
+	input.base_addr = (void *)buff_start_addr_phys;
 	input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
 		TSPP_DESCRIPTOR_SIZE;
 
-	MPQ_DVB_DBG_PRINT(
-		"%s: Processing %d descriptors: %d bytes at start address 0x%x, read offset %d\n",
-		__func__, aggregate_count, aggregate_len,
-		(unsigned int)input.base_addr,
-		buff_current_addr - buff_start_addr);
+	if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+		MPQ_DVB_DBG_PRINT(
+			"%s: SDMX Processing %d descriptors: %d bytes at start address 0x%x, read offset %d\n",
+			__func__, aggregate_count, aggregate_len,
+			(unsigned int)input.base_addr,
+			buff_current_addr_phys - buff_start_addr_phys);
 
-	mpq_sdmx_process(mpq_demux, &input, aggregate_len,
-		 buff_current_addr - buff_start_addr);
+		mpq_sdmx_process(mpq_demux, &input, aggregate_len,
+			buff_current_addr_phys - buff_start_addr_phys);
+	}
 
 	for (i = 0; i < aggregate_count; i++)
 		tspp_release_buffer(0, channel_id,
@@ -373,7 +406,6 @@
 	int channel_id;
 	int ref_count;
 	int ret;
-	int j;
 
 	do {
 		ret = wait_event_interruptible(
@@ -427,13 +459,8 @@
 					TSPP_RAW_TTS_SIZE;
 				mpq_demux->hw_notification_size += notif_size;
 
-				for (j = 0; j < notif_size; j++)
-					dvb_dmx_swfilter_packet(
-					 &mpq_demux->demux,
-					 ((u8 *)tspp_data_desc->virt_base) +
-					 j * TSPP_RAW_TTS_SIZE,
-					 ((u8 *)tspp_data_desc->virt_base) +
-					 j * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+				mpq_dmx_tspp_swfilter_desc(mpq_demux,
+					tspp_data_desc);
 				/*
 				 * Notify TSPP that the buffer
 				 * is no longer needed
@@ -1554,10 +1581,10 @@
 				"%s: warnning - len larger than one packet\n",
 				__func__);
 
-	if (mpq_dmx_is_video_feed(feed))
+	if (dvb_dmx_is_video_feed(feed))
 		return mpq_dmx_process_video_packet(feed, buf);
 
-	if (mpq_dmx_is_pcr_feed(feed))
+	if (dvb_dmx_is_pcr_feed(feed))
 		return mpq_dmx_process_pcr_packet(feed, buf);
 
 	return 0;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
index 0bd04e8..5b91436 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -43,6 +43,7 @@
 
 /* Filter-level status indicators */
 #define SDMX_FILTER_STATUS_EOS                    BIT(0)
+#define SDMX_FILTER_STATUS_WR_PTR_CHANGED         BIT(1)
 
 /* Filter-level flags */
 #define SDMX_FILTER_FLAG_VERIFY_SECTION_CRC	BIT(0)
@@ -91,10 +92,9 @@
 	SDMX_STATUS_SINGLE_PID_RAW_FILTER = -11,
 	SDMX_STATUS_INP_BUF_INVALID_PARAMS = -12,
 	SDMX_STATUS_INVALID_FILTER_CFG = -13,
-	SDMX_STATUS_ILLEGAL_WR_PTR_CHANGE = -14,
-	SDMX_STATUS_STALLED_IN_PULL_MODE = -15,
-	SDMX_STATUS_SECURITY_FAULT = -16,
-	SDMX_STATUS_NS_BUFFER_ERROR = -17,
+	SDMX_STATUS_STALLED_IN_PULL_MODE = -14,
+	SDMX_STATUS_SECURITY_FAULT = -15,
+	SDMX_STATUS_NS_BUFFER_ERROR = -16,
 };
 
 enum sdmx_filter {
@@ -177,9 +177,7 @@
 	/* Payload length */
 	u32 payload_length;
 
-	/* Total metadata length (including this header, plus optional
-	 * additional metadata.
-	 */
+	/* Number of meta data bytes immediately following this header */
 	u32 metadata_length;
 };
 
diff --git a/drivers/media/platform/msm/vcap/vcap_vp.c b/drivers/media/platform/msm/vcap/vcap_vp.c
index abc4e7e..aba7095 100644
--- a/drivers/media/platform/msm/vcap/vcap_vp.c
+++ b/drivers/media/platform/msm/vcap/vcap_vp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -472,7 +472,7 @@
 	int rc;
 	struct vcap_dev *dev = c_data->dev;
 	struct ion_handle *handle = NULL;
-	unsigned long paddr, len, ionflag = 0;
+	unsigned long paddr, len;
 	void *vaddr;
 	size_t size = ((c_data->vp_out_fmt.width + 63) >> 6) *
 		((c_data->vp_out_fmt.height + 7) >> 3) * 16;
@@ -489,13 +489,6 @@
 		return -ENOMEM;
 	}
 
-	rc = ion_handle_get_flags(dev->ion_client, handle, &ionflag);
-	if (rc) {
-		pr_err("%s: get flags ion handle failed\n", __func__);
-		ion_free(dev->ion_client, handle);
-		return rc;
-	}
-
 	vaddr = ion_map_kernel(dev->ion_client, handle);
 	if (IS_ERR(vaddr)) {
 		pr_err("%s: Map motion buffer failed\n", __func__);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 1cabc3e..f23c0aa 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -57,6 +57,27 @@
 	return 0;
 }
 
+int create_pkt_cmd_sys_debug_config(
+	struct hfi_cmd_sys_set_property_packet *pkt,
+	u32 mode)
+{
+	struct hfi_debug_config *hfi;
+	if (!pkt)
+		return -EINVAL;
+
+	pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
+		sizeof(struct hfi_debug_config) + sizeof(u32);
+	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
+	pkt->num_properties = 1;
+	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+	hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
+	hfi->debug_config = mode;
+	hfi->debug_mode = HFI_DEBUG_MODE_QUEUE;
+	if (msm_fw_debug_mode <= HFI_DEBUG_MODE_QDSS)
+		hfi->debug_mode = msm_fw_debug_mode;
+	return 0;
+}
+
 int create_pkt_set_cmd_sys_resource(
 		struct hfi_cmd_sys_set_resource_packet *pkt,
 		struct vidc_resource_hdr *resource_hdr,
@@ -178,9 +199,18 @@
 	case HAL_BUFFER_INTERNAL_SCRATCH:
 		buffer = HFI_BUFFER_INTERNAL_SCRATCH;
 		break;
+	case HAL_BUFFER_INTERNAL_SCRATCH_1:
+		buffer = HFI_BUFFER_INTERNAL_SCRATCH_1;
+		break;
+	case HAL_BUFFER_INTERNAL_SCRATCH_2:
+		buffer = HFI_BUFFER_INTERNAL_SCRATCH_2;
+		break;
 	case HAL_BUFFER_INTERNAL_PERSIST:
 		buffer = HFI_BUFFER_INTERNAL_PERSIST;
 		break;
+	case HAL_BUFFER_INTERNAL_PERSIST_1:
+		buffer = HFI_BUFFER_INTERNAL_PERSIST_1;
+		break;
 	default:
 		dprintk(VIDC_ERR, "Invalid buffer :0x%x\n",
 				hal_buffer);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.h b/drivers/media/platform/msm/vidc/hfi_packetization.h
index 8c61a40..df93906 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.h
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.h
@@ -31,6 +31,10 @@
 		struct vidc_resource_hdr *resource_hdr,
 		void *resource_value);
 
+int create_pkt_cmd_sys_debug_config(
+		struct hfi_cmd_sys_set_property_packet *pkt,
+		u32 mode);
+
 int create_pkt_cmd_sys_release_resource(
 		struct hfi_cmd_sys_release_resource_packet *pkt,
 		struct vidc_resource_hdr *resource_hdr);
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 709eafc..be9458d 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -391,12 +391,30 @@
 			buffreq->buffer[6].buffer_type =
 				HAL_BUFFER_INTERNAL_SCRATCH;
 			break;
-		case HFI_BUFFER_INTERNAL_PERSIST:
+		case HFI_BUFFER_INTERNAL_SCRATCH_1:
 			memcpy(&buffreq->buffer[7], hfi_buf_req,
-			sizeof(struct hfi_buffer_requirements));
+				sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[7].buffer_type =
+				HAL_BUFFER_INTERNAL_SCRATCH_1;
+			break;
+		case HFI_BUFFER_INTERNAL_SCRATCH_2:
+			memcpy(&buffreq->buffer[8], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[8].buffer_type =
+				HAL_BUFFER_INTERNAL_SCRATCH_2;
+			break;
+		case HFI_BUFFER_INTERNAL_PERSIST:
+			memcpy(&buffreq->buffer[9], hfi_buf_req,
+			sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[9].buffer_type =
 				HAL_BUFFER_INTERNAL_PERSIST;
 			break;
+		case HFI_BUFFER_INTERNAL_PERSIST_1:
+			memcpy(&buffreq->buffer[10], hfi_buf_req,
+				sizeof(struct hfi_buffer_requirements));
+			buffreq->buffer[10].buffer_type =
+				HAL_BUFFER_INTERNAL_PERSIST_1;
+			break;
 		default:
 			dprintk(VIDC_ERR,
 			"hal_process_sess_get_prop_buf_req: bad_buffer_type: %d",
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 5d360bb..181b2b6 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -497,6 +497,14 @@
 					b->m.planes[i].reserved[0],
 					b->m.planes[i].reserved[1],
 					b->m.planes[i].length);
+			rc = msm_smem_cache_operations(v4l2_inst->mem_client,
+				binfo->handle[i], SMEM_CACHE_CLEAN);
+			if (rc)
+				dprintk(VIDC_WARN,
+					"CACHE Clean failed: %d, %d, %d\n",
+					b->m.planes[i].reserved[0],
+					b->m.planes[i].reserved[1],
+					b->m.planes[i].length);
 		}
 		b->m.planes[i].m.userptr = binfo->device_addr[i];
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index c7dfb97..cee48c7 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -88,27 +88,6 @@
 	"High Latency",
 };
 
-static const char *const mpeg_video_vidc_extradata[] = {
-	"Extradata none",
-	"Extradata MB Quantization",
-	"Extradata Interlace Video",
-	"Extradata VC1 Framedisp",
-	"Extradata VC1 Seqdisp",
-	"Extradata timestamp",
-	"Extradata S3D Frame Packing",
-	"Extradata Frame Rate",
-	"Extradata Panscan Window",
-	"Extradata Recovery point SEI",
-	"Extradata Closed Caption UD",
-	"Extradata AFD UD",
-	"Extradata Multislice info",
-	"Extradata number of concealed MB",
-	"Extradata metadata filler",
-	"Extradata input crop",
-	"Extradata digital zoom",
-	"Extradata aspect ratio",
-};
-
 enum msm_venc_ctrl_cluster {
 	MSM_VENC_CTRL_CLUSTER_QP = 1,
 	MSM_VENC_CTRL_CLUSTER_INTRA_PERIOD,
@@ -567,34 +546,16 @@
 		.cluster = 0,
 	},
 	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA,
-		.name = "Extradata Type",
-		.type = V4L2_CTRL_TYPE_MENU,
-		.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.maximum = V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO,
-		.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
-		.menu_skip_mask = ~(
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_FRAMEDISP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_VC1_SEQDISP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_CLOSED_CAPTION_UD) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_AFD_UD) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB) |
-			(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER) |
-			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_INPUT_CROP) |
-			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_DIGITAL_ZOOM) |
-			(1 << V4L2_MPEG_VIDC_INDEX_EXTRADATA_ASPECT_RATIO)
-			),
-		.qmenu = mpeg_video_vidc_extradata,
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE,
+		.name = "Secure mode",
+		.type = V4L2_CTRL_TYPE_BUTTON,
+		.minimum = 0,
+		.maximum = 0,
+		.default_value = 0,
 		.step = 0,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.cluster = 0,
 	},
 };
 
@@ -630,7 +591,7 @@
 		.name = "Mpeg4",
 		.description = "Mpeg4 compressed format",
 		.fourcc = V4L2_PIX_FMT_MPEG4,
-		.num_planes = 2,
+		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -638,7 +599,7 @@
 		.name = "H263",
 		.description = "H263 compressed format",
 		.fourcc = V4L2_PIX_FMT_H263,
-		.num_planes = 2,
+		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -646,7 +607,7 @@
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
-		.num_planes = 2,
+		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -654,7 +615,7 @@
 		.name = "VP8",
 		.description = "VP8 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP8,
-		.num_planes = 2,
+		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -701,11 +662,6 @@
 			sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size(
 					i, inst->prop.height, inst->prop.width);
 		}
-		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
-		new_buf_count.buffer_type = HAL_BUFFER_OUTPUT;
-		new_buf_count.buffer_count_actual = *num_buffers;
-		rc = call_hfi_op(hdev, session_set_property, inst->session,
-					property_id, &new_buf_count);
 		break;
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
@@ -1490,15 +1446,10 @@
 		}
 		pdata = &enable;
 		break;
-	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
-	{
-		struct hal_extradata_enable extra;
-		property_id = HAL_PARAM_INDEX_EXTRADATA;
-		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
-		extra.enable = 1;
-		pdata = &extra;
+	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+		inst->mode = VIDC_SECURE;
+		dprintk(VIDC_INFO, "Setting secure mode to :%d\n", inst->mode);
 		break;
-	}
 	default:
 		rc = -ENOTSUPP;
 		break;
@@ -1815,7 +1766,6 @@
 	const struct msm_vidc_format *fmt = NULL;
 	int rc = 0;
 	int i;
-	int extra_idx = 0;
 	if (!inst || !f) {
 		dprintk(VIDC_ERR,
 			"Invalid input, inst = %p, format = %p\n", inst, f);
@@ -1836,16 +1786,6 @@
 			fmt->get_frame_size(i, inst->prop.height,
 					inst->prop.width);
 		}
-		extra_idx = EXTRADATA_IDX(fmt->num_planes);
-		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
-				inst->buff_req.buffer
-				[HAL_BUFFER_EXTRADATA_OUTPUT].buffer_size;
-		}
-		for (i = 0; i < fmt->num_planes; ++i) {
-			inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
-			f->fmt.pix_mp.plane_fmt[i].sizeimage;
-		}
 	} else {
 		dprintk(VIDC_ERR,
 			"Buf type not recognized, type = %d\n",	f->type);
@@ -1885,7 +1825,6 @@
 	int i;
 	struct vidc_buffer_addr_info buffer_info;
 	struct hfi_device *hdev;
-	int extra_idx = 0;
 
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR, "%s invalid parameters", __func__);
@@ -1898,41 +1837,24 @@
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		break;
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
-			dprintk(VIDC_ERR,
-				"Planes mismatch: needed: %d, allocated: %d\n",
-				inst->fmts[CAPTURE_PORT]->num_planes,
-				b->length);
-			rc = -EINVAL;
-			break;
-		}
-
-		for (i = 0; (i < b->length) && (i < VIDEO_MAX_PLANES); i++) {
-			dprintk(VIDC_DBG, "device_addr = 0x%lx, size = %d\n",
+		for (i = 0; i < b->length; i++) {
+			dprintk(VIDC_DBG,
+				"device_addr = %ld, size = %d\n",
 				b->m.planes[i].m.userptr,
 				b->m.planes[i].length);
-		}
-		buffer_info.buffer_size = b->m.planes[0].length;
-		buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr =
-			b->m.planes[0].m.userptr;
-
-		extra_idx = EXTRADATA_IDX(b->length);
-		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-			buffer_info.extradata_addr =
-				b->m.planes[extra_idx].m.userptr;
-			dprintk(VIDC_DBG, "extradata: 0x%lx\n",
-					b->m.planes[extra_idx].m.userptr);
-			buffer_info.extradata_size =
-				b->m.planes[extra_idx].length;
-		}
-
-		rc = call_hfi_op(hdev, session_set_buffers,
+			buffer_info.buffer_size = b->m.planes[i].length;
+			buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+			buffer_info.num_buffers = 1;
+			buffer_info.align_device_addr =
+				b->m.planes[i].m.userptr;
+			buffer_info.extradata_size = 0;
+			buffer_info.extradata_addr = 0;
+			rc = call_hfi_op(hdev, session_set_buffers,
 				(void *)inst->session, &buffer_info);
-		if (rc)
-			dprintk(VIDC_ERR,
+			if (rc)
+				dprintk(VIDC_ERR,
 					"vidc_hal_session_set_buffers failed");
+		}
 		break;
 	default:
 		dprintk(VIDC_ERR,
@@ -1945,7 +1867,8 @@
 int msm_venc_release_buf(struct msm_vidc_inst *inst,
 					struct v4l2_buffer *b)
 {
-	int i, rc = 0, extra_idx = 0;
+	int rc = 0;
+	int i;
 	struct vidc_buffer_addr_info buffer_info;
 	struct hfi_device *hdev;
 
@@ -1966,36 +1889,24 @@
 	switch (b->type) {
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
-		if (b->length !=
-			inst->fmts[CAPTURE_PORT]->num_planes) {
-			dprintk(VIDC_ERR,
-					"Planes mismatch: needed: %d, to release: %d\n",
-					inst->fmts[CAPTURE_PORT]->num_planes,
-					b->length);
-			rc = -EINVAL;
-			break;
-		}
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 		for (i = 0; i < b->length; i++) {
 			dprintk(VIDC_DBG,
-				"Release device_addr = 0x%lx, size = %d, %d\n",
+				"Release device_addr = %ld, size = %d, %d\n",
 				b->m.planes[i].m.userptr,
 				b->m.planes[i].length, inst->state);
-		}
-		buffer_info.buffer_size = b->m.planes[0].length;
-		buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
-		buffer_info.num_buffers = 1;
-		buffer_info.align_device_addr =
-			b->m.planes[0].m.userptr;
-		extra_idx = EXTRADATA_IDX(b->length);
-		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES))
-			buffer_info.extradata_addr =
-			b->m.planes[extra_idx].m.userptr;
-		buffer_info.response_required = false;
-		rc = call_hfi_op(hdev, session_release_buffers,
+			buffer_info.buffer_size = b->m.planes[i].length;
+			buffer_info.buffer_type = HAL_BUFFER_OUTPUT;
+			buffer_info.num_buffers = 1;
+			buffer_info.align_device_addr =
+				 b->m.planes[i].m.userptr;
+			buffer_info.extradata_size = 0;
+			buffer_info.extradata_addr = 0;
+			buffer_info.response_required = false;
+			rc = call_hfi_op(hdev, session_release_buffers,
 				(void *)inst->session, &buffer_info);
-		if (rc)
-			dprintk(VIDC_ERR,
+			if (rc)
+				dprintk(VIDC_ERR,
 					"vidc_hal_session_release_buffers failed\n");
 		}
 		break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 6a83334..8cce310 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -362,7 +362,9 @@
 	struct msm_vidc_cb_cmd_done *response = data;
 	struct msm_vidc_inst *inst;
 	struct v4l2_event dqevent;
+	struct v4l2_control control = {0};
 	struct msm_vidc_cb_event *event_notify;
+	int rc = 0;
 	if (response) {
 		inst = (struct msm_vidc_inst *)response->session_id;
 		dqevent.id = 0;
@@ -370,7 +372,16 @@
 		switch (event_notify->hal_event_type) {
 		case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
 			dqevent.type =
-				V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
+				V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
+			control.id =
+				V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER;
+			rc = v4l2_g_ctrl(&inst->ctrl_handler, &control);
+			if (rc)
+				dprintk(VIDC_WARN,
+					"Failed to get Smooth streamng flag\n");
+			if (!rc && control.value == true)
+				dqevent.type =
+					V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
 			break;
 		case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
 			dqevent.type =
@@ -1432,6 +1443,198 @@
 	return flipped_state;
 }
 
+struct hal_buffer_requirements *get_buff_req_buffer(
+		struct msm_vidc_inst *inst, enum hal_buffer buffer_type)
+{
+	int i;
+	for (i = 0; i < HAL_BUFFER_MAX; i++) {
+		if (inst->buff_req.buffer[i].buffer_type == buffer_type)
+			return &inst->buff_req.buffer[i];
+	}
+	return NULL;
+}
+
+static int set_scratch_buffers(struct msm_vidc_inst *inst,
+	enum hal_buffer buffer_type)
+{
+	int rc = 0;
+	struct msm_smem *handle;
+	struct internal_buf *binfo;
+	struct vidc_buffer_addr_info buffer_info;
+	u32 smem_flags = 0;
+	int domain;
+	struct hal_buffer_requirements *scratch_buf;
+	int i;
+	struct hfi_device *hdev;
+
+	hdev = inst->core->device;
+
+	scratch_buf = get_buff_req_buffer(inst, buffer_type);
+	if (!scratch_buf) {
+		dprintk(VIDC_DBG,
+			"This scratch buffer not required, buffer_type: %x\n",
+			buffer_type);
+		return 0;
+	}
+	dprintk(VIDC_DBG,
+		"scratch: num = %d, size = %d\n",
+		scratch_buf->buffer_count_actual,
+		scratch_buf->buffer_size);
+
+	if (inst->mode == VIDC_SECURE) {
+		domain = call_hfi_op(hdev, get_domain,
+				hdev->hfi_device_data, CP_MAP);
+		smem_flags |= SMEM_SECURE;
+	} else
+		domain = call_hfi_op(hdev, get_domain,
+				hdev->hfi_device_data, NS_MAP);
+
+	if (scratch_buf->buffer_size) {
+		for (i = 0; i < scratch_buf->buffer_count_actual;
+				i++) {
+			handle = msm_smem_alloc(inst->mem_client,
+				scratch_buf->buffer_size, 1, smem_flags,
+				domain, 0, 0);
+			if (!handle) {
+				dprintk(VIDC_ERR,
+					"Failed to allocate scratch memory\n");
+				rc = -ENOMEM;
+				goto err_no_mem;
+			}
+			rc = msm_smem_cache_operations(inst->mem_client,
+					handle, SMEM_CACHE_CLEAN);
+			if (rc) {
+				dprintk(VIDC_WARN,
+				"Failed to clean cache may cause undefined behavior\n");
+			}
+			binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+			if (!binfo) {
+				dprintk(VIDC_ERR, "Out of memory\n");
+				rc = -ENOMEM;
+				goto fail_kzalloc;
+			}
+			binfo->handle = handle;
+			buffer_info.buffer_size = scratch_buf->buffer_size;
+			buffer_info.buffer_type = buffer_type;
+			binfo->buffer_type = buffer_type;
+			buffer_info.num_buffers = 1;
+			buffer_info.align_device_addr = handle->device_addr;
+			dprintk(VIDC_DBG, "Scratch buffer address: %x",
+					buffer_info.align_device_addr);
+			rc = call_hfi_op(hdev, session_set_buffers,
+				(void *) inst->session, &buffer_info);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"vidc_hal_session_set_buffers failed");
+				goto fail_set_buffers;
+			}
+			mutex_lock(&inst->lock);
+			list_add_tail(&binfo->list, &inst->internalbufs);
+			mutex_unlock(&inst->lock);
+		}
+	}
+	return rc;
+fail_set_buffers:
+	kfree(binfo);
+fail_kzalloc:
+	msm_smem_free(inst->mem_client, handle);
+err_no_mem:
+	return rc;
+}
+
+static int set_persist_buffers(struct msm_vidc_inst *inst,
+	enum hal_buffer buffer_type)
+{
+	int rc = 0;
+	struct msm_smem *handle;
+	struct internal_buf *binfo;
+	struct vidc_buffer_addr_info buffer_info;
+	u32 smem_flags = 0;
+	int domain;
+	struct hal_buffer_requirements *persist_buf;
+	int i;
+	struct hfi_device *hdev;
+
+	hdev = inst->core->device;
+
+	persist_buf = get_buff_req_buffer(inst, buffer_type);
+	if (!persist_buf) {
+		dprintk(VIDC_DBG,
+			"This persist buffer not required, buffer_type: %x\n",
+			buffer_type);
+		return 0;
+	}
+
+	dprintk(VIDC_DBG,
+		"persist: num = %d, size = %d\n",
+		persist_buf->buffer_count_actual,
+		persist_buf->buffer_size);
+	if (!list_empty(&inst->persistbufs)) {
+		dprintk(VIDC_ERR,
+			"Persist buffers already allocated\n");
+		return rc;
+	}
+
+	if (inst->mode == VIDC_SECURE) {
+		domain = call_hfi_op(hdev, get_domain,
+				hdev->hfi_device_data, CP_MAP);
+		smem_flags |= SMEM_SECURE;
+	} else
+		domain = call_hfi_op(hdev, get_domain,
+				hdev->hfi_device_data, NS_MAP);
+
+	if (persist_buf->buffer_size) {
+		for (i = 0; i < persist_buf->buffer_count_actual; i++) {
+			handle = msm_smem_alloc(inst->mem_client,
+				persist_buf->buffer_size, 1, smem_flags,
+				domain, 0, 0);
+			if (!handle) {
+				dprintk(VIDC_ERR,
+					"Failed to allocate persist memory\n");
+				rc = -ENOMEM;
+				goto err_no_mem;
+			}
+			rc = msm_smem_cache_operations(inst->mem_client,
+					handle, SMEM_CACHE_CLEAN);
+			if (rc) {
+				dprintk(VIDC_WARN,
+				"Failed to clean cache may cause undefined behavior\n");
+			}
+			binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+			if (!binfo) {
+				dprintk(VIDC_ERR, "Out of memory\n");
+				rc = -ENOMEM;
+				goto fail_kzalloc;
+			}
+			binfo->handle = handle;
+			buffer_info.buffer_size = persist_buf->buffer_size;
+			buffer_info.buffer_type = buffer_type;
+			binfo->buffer_type = buffer_type;
+			buffer_info.num_buffers = 1;
+			buffer_info.align_device_addr = handle->device_addr;
+			dprintk(VIDC_DBG, "Persist buffer address: %x",
+					buffer_info.align_device_addr);
+			rc = call_hfi_op(hdev, session_set_buffers,
+					(void *) inst->session, &buffer_info);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"vidc_hal_session_set_buffers failed");
+				goto fail_set_buffers;
+			}
+			mutex_lock(&inst->lock);
+			list_add_tail(&binfo->list, &inst->persistbufs);
+			mutex_unlock(&inst->lock);
+		}
+	}
+	return rc;
+fail_set_buffers:
+	kfree(binfo);
+fail_kzalloc:
+	msm_smem_free(inst->mem_client, handle);
+err_no_mem:
+	return rc;
+}
+
 int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
 {
 	int rc = 0;
@@ -1712,6 +1915,7 @@
 	mutex_unlock(&inst->sync_lock);
 	return rc;
 }
+
 int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst)
 {
 	struct msm_smem *handle;
@@ -1744,7 +1948,7 @@
 					list);
 			handle = buf->handle;
 			buffer_info.buffer_size = handle->size;
-			buffer_info.buffer_type = HAL_BUFFER_INTERNAL_SCRATCH;
+			buffer_info.buffer_type = buf->buffer_type;
 			buffer_info.num_buffers = 1;
 			buffer_info.align_device_addr = handle->device_addr;
 			if (inst->state != MSM_VIDC_CORE_INVALID &&
@@ -1808,7 +2012,7 @@
 					list);
 			handle = buf->handle;
 			buffer_info.buffer_size = handle->size;
-			buffer_info.buffer_type = HAL_BUFFER_INTERNAL_PERSIST;
+			buffer_info.buffer_type = buf->buffer_type;
 			buffer_info.num_buffers = 1;
 			buffer_info.align_device_addr = handle->device_addr;
 			if (inst->state != MSM_VIDC_CORE_INVALID &&
@@ -1874,178 +2078,50 @@
 int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
-	struct msm_smem *handle;
-	struct internal_buf *binfo;
-	struct vidc_buffer_addr_info buffer_info;
-	int domain;
-	unsigned long smem_flags = 0;
-	struct hal_buffer_requirements *scratch_buf;
-	int i;
-	struct hfi_device *hdev;
-
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR, "%s invalid parameters", __func__);
 		return -EINVAL;
 	}
 
-	hdev = inst->core->device;
-
-	scratch_buf =
-		&inst->buff_req.buffer[HAL_BUFFER_INTERNAL_SCRATCH];
-	dprintk(VIDC_DBG,
-		"scratch: num = %d, size = %d\n",
-		scratch_buf->buffer_count_actual,
-		scratch_buf->buffer_size);
 	if (msm_comm_release_scratch_buffers(inst))
 		dprintk(VIDC_WARN, "Failed to release scratch buffers\n");
-	if (inst->mode == VIDC_SECURE) {
-		domain = call_hfi_op(hdev, get_domain,
-				hdev->hfi_device_data, CP_MAP);
-		smem_flags |= SMEM_SECURE;
-	} else
-		domain = call_hfi_op(hdev, get_domain,
-				hdev->hfi_device_data, NS_MAP);
 
-	if (scratch_buf->buffer_size) {
-		for (i = 0; i < scratch_buf->buffer_count_actual;
-				i++) {
-			handle = msm_smem_alloc(inst->mem_client,
-				scratch_buf->buffer_size, 1, smem_flags,
-				domain, 0, 0);
-			if (!handle) {
-				dprintk(VIDC_ERR,
-					"Failed to allocate scratch memory\n");
-				rc = -ENOMEM;
-				goto err_no_mem;
-			}
-			rc = msm_smem_cache_operations(inst->mem_client,
-					handle, SMEM_CACHE_CLEAN);
-			if (rc) {
-				dprintk(VIDC_WARN,
-				"Failed to clean cache may cause undefined behavior\n");
-			}
-			binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
-			if (!binfo) {
-				dprintk(VIDC_ERR, "Out of memory\n");
-				rc = -ENOMEM;
-				goto fail_kzalloc;
-			}
-			binfo->handle = handle;
-			buffer_info.buffer_size = scratch_buf->buffer_size;
-			buffer_info.buffer_type = HAL_BUFFER_INTERNAL_SCRATCH;
-			buffer_info.num_buffers = 1;
-			buffer_info.align_device_addr = handle->device_addr;
-			dprintk(VIDC_DBG, "Scratch buffer address: %x",
-					buffer_info.align_device_addr);
-			rc = call_hfi_op(hdev, session_set_buffers,
-				(void *) inst->session, &buffer_info);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"vidc_hal_session_set_buffers failed");
-				goto fail_set_buffers;
-			}
-			mutex_lock(&inst->lock);
-			list_add_tail(&binfo->list, &inst->internalbufs);
-			mutex_unlock(&inst->lock);
-		}
-	}
+	rc = set_scratch_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH);
+	if (rc)
+		goto error;
+
+	rc = set_scratch_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_1);
+	if (rc)
+		goto error;
+
+	rc = set_scratch_buffers(inst, HAL_BUFFER_INTERNAL_SCRATCH_2);
+	if (rc)
+		goto error;
+
 	return rc;
-fail_set_buffers:
-	kfree(binfo);
-fail_kzalloc:
-	msm_smem_free(inst->mem_client, handle);
-err_no_mem:
+error:
+	msm_comm_release_scratch_buffers(inst);
 	return rc;
 }
 
 int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
-	struct msm_smem *handle;
-	struct internal_buf *binfo;
-	struct vidc_buffer_addr_info buffer_info;
-	unsigned long flags;
-	unsigned long smem_flags = 0;
-	int domain;
-	struct hal_buffer_requirements *persist_buf;
-	int i;
-	struct hfi_device *hdev;
-
 	if (!inst || !inst->core || !inst->core->device) {
 		dprintk(VIDC_ERR, "%s invalid parameters", __func__);
 		return -EINVAL;
 	}
 
-	hdev = inst->core->device;
+	rc = set_persist_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST);
+	if (rc)
+		goto error;
 
-	persist_buf =
-		&inst->buff_req.buffer[HAL_BUFFER_INTERNAL_PERSIST];
-	dprintk(VIDC_DBG,
-		"persist: num = %d, size = %d\n",
-		persist_buf->buffer_count_actual,
-		persist_buf->buffer_size);
-	if (!list_empty(&inst->persistbufs)) {
-		dprintk(VIDC_ERR,
-			"Persist buffers already allocated\n");
-		return rc;
-	}
-
-	if (inst->mode == VIDC_SECURE) {
-		domain = call_hfi_op(hdev, get_domain,
-				hdev->hfi_device_data, CP_MAP);
-		flags |= SMEM_SECURE;
-	} else
-		domain = call_hfi_op(hdev, get_domain,
-				hdev->hfi_device_data, NS_MAP);
-
-	if (persist_buf->buffer_size) {
-		for (i = 0;	i <	persist_buf->buffer_count_actual; i++) {
-			handle = msm_smem_alloc(inst->mem_client,
-				persist_buf->buffer_size, 1, smem_flags,
-				domain, 0, 0);
-			if (!handle) {
-				dprintk(VIDC_ERR,
-					"Failed to allocate persist memory\n");
-				rc = -ENOMEM;
-				goto err_no_mem;
-			}
-			rc = msm_smem_cache_operations(inst->mem_client,
-					handle, SMEM_CACHE_CLEAN);
-			if (rc) {
-				dprintk(VIDC_WARN,
-				"Failed to clean cache may cause undefined behavior\n");
-			}
-			binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
-			if (!binfo) {
-				dprintk(VIDC_ERR, "Out of memory\n");
-				rc = -ENOMEM;
-				goto fail_kzalloc;
-			}
-			binfo->handle = handle;
-			buffer_info.buffer_size = persist_buf->buffer_size;
-			buffer_info.buffer_type = HAL_BUFFER_INTERNAL_PERSIST;
-			buffer_info.num_buffers = 1;
-			buffer_info.align_device_addr = handle->device_addr;
-			dprintk(VIDC_DBG, "Persist buffer address: %x",
-					buffer_info.align_device_addr);
-			rc = call_hfi_op(hdev, session_set_buffers,
-					(void *) inst->session, &buffer_info);
-			if (rc) {
-				dprintk(VIDC_ERR,
-					"vidc_hal_session_set_buffers failed");
-				goto fail_set_buffers;
-			}
-			mutex_lock(&inst->lock);
-			list_add_tail(&binfo->list, &inst->persistbufs);
-			mutex_unlock(&inst->lock);
-		}
-	}
+	rc = set_persist_buffers(inst, HAL_BUFFER_INTERNAL_PERSIST_1);
+	if (rc)
+		goto error;
 	return rc;
-fail_set_buffers:
-	kfree(binfo);
-fail_kzalloc:
-	msm_smem_free(inst->mem_client, handle);
-err_no_mem:
+error:
+	msm_comm_release_persist_buffers(inst);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index c03a4c4..8238d42 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -92,6 +92,7 @@
 
 struct internal_buf {
 	struct list_head list;
+	enum hal_buffer buffer_type;
 	struct msm_smem *handle;
 };
 
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 6d07165..af8b761 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -892,20 +892,16 @@
 
 static int venus_hfi_sys_set_debug(struct venus_hfi_device *device, int debug)
 {
-	struct hfi_debug_config *hfi;
 	u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE];
+	int rc = 0;
 	struct hfi_cmd_sys_set_property_packet *pkt =
 		(struct hfi_cmd_sys_set_property_packet *) &packet;
-	pkt->size = sizeof(struct hfi_cmd_sys_set_property_packet) +
-		sizeof(struct hfi_debug_config) + sizeof(u32);
-	pkt->packet_type = HFI_CMD_SYS_SET_PROPERTY;
-	pkt->num_properties = 1;
-	pkt->rg_property_data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
-	hfi = (struct hfi_debug_config *) &pkt->rg_property_data[1];
-	hfi->debug_config = debug;
-	hfi->debug_mode = HFI_DEBUG_MODE_QUEUE;
-	if (msm_fw_debug_mode <= HFI_DEBUG_MODE_QDSS)
-		hfi->debug_mode = msm_fw_debug_mode;
+	rc = create_pkt_cmd_sys_debug_config(pkt, debug);
+	if (rc) {
+		dprintk(VIDC_WARN,
+			"Debug mode setting to FW failed\n");
+		return -ENOTEMPTY;
+	}
 	if (venus_hfi_iface_cmdq_write(device, pkt))
 		return -ENOTEMPTY;
 	return 0;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 75594b3..8b3e7cb 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -53,6 +53,8 @@
 #define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
 #define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
 #define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_SCRATCH_1 (HFI_OX_BASE + 0x5)
+#define HFI_BUFFER_INTERNAL_SCRATCH_2 (HFI_OX_BASE + 0x6)
 
 #define HFI_BUFFER_MODE_STATIC (HFI_OX_BASE + 0x1)
 #define HFI_BUFFER_MODE_RING (HFI_OX_BASE + 0x2)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index d06ea51..a057303 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -383,7 +383,10 @@
 	HAL_BUFFER_EXTRADATA_OUTPUT,
 	HAL_BUFFER_EXTRADATA_OUTPUT2,
 	HAL_BUFFER_INTERNAL_SCRATCH,
+	HAL_BUFFER_INTERNAL_SCRATCH_1,
+	HAL_BUFFER_INTERNAL_SCRATCH_2,
 	HAL_BUFFER_INTERNAL_PERSIST,
+	HAL_BUFFER_INTERNAL_PERSIST_1,
 	HAL_BUFFER_MAX
 };
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 37c051e..01c5e0b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -179,6 +179,7 @@
 #define HFI_BUFFER_OUTPUT				(HFI_COMMON_BASE + 0x2)
 #define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
 #define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
+#define HFI_BUFFER_INTERNAL_PERSIST_1		(HFI_COMMON_BASE + 0x5)
 
 struct hfi_buffer_info {
 	u32 buffer_addr;
diff --git a/drivers/media/platform/msm/wfd/enc-subdev.h b/drivers/media/platform/msm/wfd/enc-subdev.h
index 93c0079..8bfb884 100644
--- a/drivers/media/platform/msm/wfd/enc-subdev.h
+++ b/drivers/media/platform/msm/wfd/enc-subdev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -76,6 +76,8 @@
 			(a->offset == b->offset);
 	else if (a->kvaddr || b->kvaddr)
 		return a->kvaddr == b->kvaddr;
+	else if (a->paddr || b->paddr)
+		return a->paddr == b->paddr;
 	else
 		return false;
 }
@@ -107,6 +109,7 @@
 #define ENC_MMAP _IOWR('V', 25, struct mem_region_map *)
 #define ENC_MUNMAP _IOWR('V', 26, struct mem_region_map *)
 #define SET_FRAMERATE_MODE _IO('V', 27)
+#define ENC_SECURE _IO('V', 28)
 
 extern int venc_init(struct v4l2_subdev *sd, u32 val);
 extern int venc_load_fw(struct v4l2_subdev *sd);
diff --git a/drivers/media/platform/msm/wfd/enc-venus-subdev.c b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
index 73a3d8e..b41ece6 100644
--- a/drivers/media/platform/msm/wfd/enc-venus-subdev.c
+++ b/drivers/media/platform/msm/wfd/enc-venus-subdev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -252,12 +252,23 @@
 	return msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
 }
 
+static long get_iommu_domain(struct venc_inst *inst)
+{
+	struct msm_vidc_iommu_info maps[MAX_MAP];
+	int rc = msm_vidc_get_iommu_maps(inst->vidc_context, maps);
+	if (rc) {
+		WFD_MSG_ERR("Failed to retreive domain mappings\n");
+		return rc;
+	}
+
+	return maps[inst->secure ? CP_MAP : NS_MAP].domain;
+}
+
 static long venc_open(struct v4l2_subdev *sd, void *arg)
 {
 	struct venc_inst *inst = NULL;
 	struct venc_msg_ops *vmops = arg;
 	struct v4l2_event_subscription event = {0};
-	struct msm_vidc_iommu_info maps[MAX_MAP];
 	int rc = 0;
 
 	if (!vmops) {
@@ -305,15 +316,12 @@
 		goto vidc_subscribe_fail;
 	}
 
-	rc = msm_vidc_get_iommu_maps(inst->vidc_context, maps);
-	if (rc) {
-		WFD_MSG_ERR("Failed to retreive domain mappings\n");
-		rc = -ENODATA;
+	inst->domain = get_iommu_domain(inst);
+	if (inst->domain < 0) {
+		WFD_MSG_ERR("Failed to get domain\n");
 		goto vidc_subscribe_fail;
 	}
 
-	inst->domain = maps[inst->secure ? CP_MAP : NS_MAP].domain;
-
 	inst->callback_thread = kthread_run(venc_vidc_callback_thread, inst,
 					"venc_vidc_callback_thread");
 	if (IS_ERR(inst->callback_thread)) {
@@ -477,7 +485,8 @@
 	}
 
 	bufreq->count = v4l2_bufreq.count;
-	bufreq->size = v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage;
+	bufreq->size = ALIGN(v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage,
+			inst->secure ? SZ_1M : SZ_4K);
 
 	inst->free_input_indices.size_bits = bufreq->count;
 	inst->free_input_indices.size = roundup(bufreq->count,
@@ -549,12 +558,30 @@
 	return rc;
 }
 
+static void populate_planes(struct v4l2_plane *planes, int num_planes,
+		void *userptr, int size)
+{
+	int c = 0;
+
+	planes[0] = (struct v4l2_plane) {
+		.length = size,
+		.m.userptr = (int)userptr,
+	};
+
+	for (c = 1; c < num_planes - 1; ++c) {
+		planes[c] = (struct v4l2_plane) {
+			.length = 0,
+			.m.userptr = (int)NULL,
+		};
+	}
+}
+
 static long venc_set_input_buffer(struct v4l2_subdev *sd, void *arg)
 {
 	int rc = 0;
 	struct venc_inst *inst = NULL;
 	struct v4l2_buffer buf = {0};
-	struct v4l2_plane plane = {0};
+	struct v4l2_plane *planes = NULL;
 	struct mem_region *mregion = arg;
 
 	if (!sd) {
@@ -575,20 +602,21 @@
 	}
 
 	mregion = kzalloc(sizeof(*mregion), GFP_KERNEL);
-	*mregion = *(struct mem_region *)arg;
+	planes = kzalloc(sizeof(*planes) * inst->num_input_planes, GFP_KERNEL);
+	if (!mregion || !planes)
+		return -ENOMEM;
 
-	plane = (struct v4l2_plane) {
-		.length = mregion->size,
-		.m.userptr = (u32)mregion->paddr,
-	};
+	*mregion = *(struct mem_region *)arg;
+	populate_planes(planes, inst->num_input_planes,
+			mregion->paddr, mregion->size);
 
 	buf = (struct v4l2_buffer) {
 		.index = get_list_len(&inst->registered_input_bufs),
 		.type = BUF_TYPE_INPUT,
 		.bytesused = 0,
 		.memory = V4L2_MEMORY_USERPTR,
-		.m.planes = &plane,
-		.length = 1,
+		.m.planes = planes,
+		.length = inst->num_input_planes,
 	};
 
 	WFD_MSG_DBG("Prepare %p with index, %d",
@@ -600,9 +628,12 @@
 	}
 
 	list_add_tail(&mregion->list, &inst->registered_input_bufs.list);
+
+	kfree(planes);
 	return 0;
 set_input_buffer_fail:
 	kfree(mregion);
+	kfree(planes);
 	return rc;
 }
 
@@ -610,12 +641,19 @@
 		struct mem_region *mregion)
 {
 	int rc = 0;
-	unsigned long flags = 0, size = 0;
+	unsigned long size = 0, align_req = 0;
 	if (!mregion) {
 		rc = -EINVAL;
 		goto venc_map_fail;
 	}
 
+	align_req = inst->secure ? SZ_1M : SZ_4K;
+	if (mregion->size % align_req != 0) {
+		WFD_MSG_ERR("Memregion not aligned to %ld\n", align_req);
+		rc = -EINVAL;
+		goto venc_map_fail;
+	}
+
 	mregion->ion_handle = ion_import_dma_buf(venc_ion_client, mregion->fd);
 	if (IS_ERR_OR_NULL(mregion->ion_handle)) {
 		rc = PTR_ERR(mregion->ion_handle);
@@ -625,25 +663,31 @@
 		goto venc_map_fail;
 	}
 
-	rc = ion_handle_get_flags(venc_ion_client, mregion->ion_handle, &flags);
-	if (rc) {
-		WFD_MSG_ERR("Failed to get ion flags %d\n", rc);
-		goto venc_map_fail;
+	if (!inst->secure) {
+		mregion->kvaddr = ion_map_kernel(venc_ion_client,
+				mregion->ion_handle);
+		if (IS_ERR_OR_NULL(mregion->kvaddr)) {
+			WFD_MSG_ERR("Failed to map buffer into kernel\n");
+			rc = PTR_ERR(mregion->kvaddr);
+			mregion->kvaddr = NULL;
+			goto venc_map_fail;
+		}
+	} else {
+		mregion->kvaddr = NULL;
 	}
 
-	mregion->kvaddr = ion_map_kernel(venc_ion_client,
-				mregion->ion_handle);
-
-	if (IS_ERR_OR_NULL(mregion->kvaddr)) {
-		WFD_MSG_ERR("Failed to map buffer into kernel\n");
-		rc = PTR_ERR(mregion->kvaddr);
-		mregion->kvaddr = NULL;
-		goto venc_map_fail;
+	if (inst->secure) {
+		rc = msm_ion_secure_buffer(venc_ion_client,
+			mregion->ion_handle, VIDEO_BITSTREAM, 0);
+		if (rc) {
+			WFD_MSG_ERR("Failed to secure output buffer\n");
+			goto venc_map_iommu_map_fail;
+		}
 	}
 
 	rc = ion_map_iommu(venc_ion_client, mregion->ion_handle,
-			inst->domain, 0, SZ_4K, 0,
-			(unsigned long *)&mregion->paddr, &size, flags, 0);
+			inst->domain, 0, align_req, 0,
+			(unsigned long *)&mregion->paddr, &size, 0, 0);
 
 	if (rc) {
 		WFD_MSG_ERR("Failed to map into iommu\n");
@@ -657,8 +701,12 @@
 venc_map_iommu_size_fail:
 	ion_unmap_iommu(venc_ion_client, mregion->ion_handle,
 			inst->domain, 0);
+
+	if (inst->secure)
+		msm_ion_unsecure_buffer(venc_ion_client, mregion->ion_handle);
 venc_map_iommu_map_fail:
-	ion_unmap_kernel(venc_ion_client, mregion->ion_handle);
+	if (!inst->secure)
+		ion_unmap_kernel(venc_ion_client, mregion->ion_handle);
 venc_map_fail:
 	return rc;
 }
@@ -680,6 +728,8 @@
 		mregion->kvaddr = NULL;
 	}
 
+	if (inst->secure)
+		msm_ion_unsecure_buffer(venc_ion_client, mregion->ion_handle);
 
 	return 0;
 }
@@ -689,7 +739,7 @@
 	int rc = 0;
 	struct venc_inst *inst = NULL;
 	struct v4l2_buffer buf = {0};
-	struct v4l2_plane plane = {0};
+	struct v4l2_plane *planes = NULL;
 	struct mem_region *mregion = arg;
 
 	if (!sd) {
@@ -712,8 +762,9 @@
 	}
 
 	mregion = kzalloc(sizeof(*mregion), GFP_KERNEL);
+	planes = kzalloc(sizeof(*planes) * inst->num_output_planes, GFP_KERNEL);
 
-	if (!mregion) {
+	if (!mregion || !planes) {
 		WFD_MSG_ERR("Failed to allocate memory\n");
 		goto venc_set_output_buffer_fail;
 	}
@@ -727,18 +778,16 @@
 		goto venc_set_output_buffer_map_fail;
 	}
 
-	plane = (struct v4l2_plane) {
-		.length = mregion->size,
-		.m.userptr = (u32)mregion->paddr,
-	};
+	populate_planes(planes, inst->num_output_planes,
+			mregion->paddr, mregion->size);
 
 	buf = (struct v4l2_buffer) {
 		.index = get_list_len(&inst->registered_output_bufs),
 		.type = BUF_TYPE_OUTPUT,
 		.bytesused = 0,
 		.memory = V4L2_MEMORY_USERPTR,
-		.m.planes = &plane,
-		.length = 1,
+		.m.planes = planes,
+		.length = inst->num_output_planes,
 	};
 
 	WFD_MSG_DBG("Prepare %p with index, %d",
@@ -750,11 +799,14 @@
 	}
 
 	list_add_tail(&mregion->list, &inst->registered_output_bufs.list);
-	return rc;
+
+	kfree(planes);
+	return 0;
 venc_set_output_buffer_prepare_fail:
 	venc_unmap_user_to_kernel(inst, mregion);
 venc_set_output_buffer_map_fail:
 	kfree(mregion);
+	kfree(planes);
 venc_set_output_buffer_fail:
 	return rc;
 }
@@ -763,7 +815,7 @@
 {
 	struct venc_inst *inst = NULL;
 	struct v4l2_format *fmt = arg, temp;
-	int rc = 0;
+	int rc = 0, align_req = 0;
 
 	if (!sd) {
 		WFD_MSG_ERR("Subdevice required for %s\n", __func__);
@@ -799,7 +851,10 @@
 		rc = -EINVAL;
 		goto venc_set_format_fail;
 	}
-	fmt->fmt.pix.sizeimage = temp.fmt.pix_mp.plane_fmt[0].sizeimage;
+
+	align_req = inst->secure ? SZ_1M : SZ_4K;
+	fmt->fmt.pix.sizeimage = ALIGN(temp.fmt.pix_mp.plane_fmt[0].sizeimage,
+					align_req);
 	inst->num_output_planes = temp.fmt.pix_mp.num_planes;
 
 	temp.type = BUF_TYPE_INPUT;
@@ -971,7 +1026,6 @@
 		WFD_MSG_ERR("Trying to free a buffer of unknown type\n");
 		return -EINVAL;
 	}
-
 	mregion = get_registered_mregion(buf_list, to_free);
 
 	if (!mregion) {
@@ -1091,7 +1145,7 @@
 {
 	struct mem_region_map *mmap = arg;
 	struct mem_region *mregion = NULL;
-	unsigned long rc = 0, size = 0;
+	unsigned long rc = 0, size = 0, align_req = 0;
 	void *paddr = NULL;
 	struct venc_inst *inst = NULL;
 
@@ -1105,24 +1159,47 @@
 
 	inst = (struct venc_inst *)sd->dev_priv;
 	mregion = mmap->mregion;
-	if (mregion->size % SZ_4K != 0) {
-		WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K);
-		return -EINVAL;
+
+	align_req = inst->secure ? SZ_1M : SZ_4K;
+	if (mregion->size % align_req != 0) {
+		WFD_MSG_ERR("Memregion not aligned to %ld\n", align_req);
+		rc = -EINVAL;
+		goto venc_map_bad_align;
+	}
+
+	if (inst->secure) {
+		rc = msm_ion_secure_buffer(mmap->ion_client,
+			mregion->ion_handle, VIDEO_PIXEL, 0);
+		if (rc) {
+			WFD_MSG_ERR("Failed to secure input buffer\n");
+			goto venc_map_bad_align;
+		}
 	}
 
 	rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle,
-			inst->domain, 0, SZ_4K, 0, (unsigned long *)&paddr,
+			inst->domain, 0, align_req, 0, (unsigned long *)&paddr,
 			&size, 0, 0);
 
 	if (rc) {
-		WFD_MSG_ERR("Failed to get physical addr\n");
+		WFD_MSG_ERR("Failed to get physical addr %ld\n", rc);
 		paddr = NULL;
+		goto venc_map_bad_align;
 	} else if (size < mregion->size) {
 		WFD_MSG_ERR("Failed to map enough memory\n");
 		rc = -ENOMEM;
+		goto venc_map_iommu_size_fail;
 	}
 
 	mregion->paddr = paddr;
+	return 0;
+
+venc_map_iommu_size_fail:
+	ion_unmap_iommu(venc_ion_client, mregion->ion_handle,
+			inst->domain, 0);
+
+	if (inst->secure)
+		msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+venc_map_bad_align:
 	return rc;
 }
 
@@ -1143,8 +1220,13 @@
 	inst = (struct venc_inst *)sd->dev_priv;
 	mregion = mmap->mregion;
 
-	ion_unmap_iommu(mmap->ion_client, mregion->ion_handle,
+	if (mregion->paddr)
+		ion_unmap_iommu(mmap->ion_client, mregion->ion_handle,
 			inst->domain, 0);
+
+	if (inst->secure)
+		msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+
 	return 0;
 }
 
@@ -1157,6 +1239,55 @@
 	return 0;
 }
 
+static long secure_toggle(struct venc_inst *inst, bool secure)
+{
+	if (inst->secure == secure)
+		return 0;
+
+	if (!list_empty(&inst->registered_input_bufs.list) ||
+		!list_empty(&inst->registered_output_bufs.list)) {
+		WFD_MSG_ERR(
+			"Attempt to (un)secure encoder not allowed after registering buffers"
+			);
+		return -EEXIST;
+	}
+
+	inst->secure = secure;
+	inst->domain = get_iommu_domain(inst);
+	return 0;
+}
+
+static long venc_secure(struct v4l2_subdev *sd)
+{
+	struct venc_inst *inst = NULL;
+	struct v4l2_control ctrl;
+	int rc = 0;
+
+	if (!sd) {
+		WFD_MSG_ERR("Subdevice required for %s\n", __func__);
+		return -EINVAL;
+	}
+
+	inst = sd->dev_priv;
+	rc = secure_toggle(inst, true);
+	if (rc) {
+		WFD_MSG_ERR("Failed to toggle into secure mode\n");
+		goto secure_fail;
+	}
+
+	ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE;
+	rc = msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
+	if (rc) {
+		WFD_MSG_ERR("Failed to move vidc into secure mode\n");
+		goto secure_fail;
+	}
+
+	return 0;
+secure_fail:
+	secure_toggle(sd->dev_priv, false);
+	return rc;
+}
+
 long venc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
 {
 	long rc = 0;
@@ -1229,6 +1360,9 @@
 	case SET_FRAMERATE_MODE:
 		rc = venc_set_framerate_mode(sd, arg);
 		break;
+	case ENC_SECURE:
+		rc = venc_secure(sd);
+		break;
 	default:
 		WFD_MSG_ERR("Unknown ioctl %d to enc-subdev\n", cmd);
 		rc = -ENOTSUPP;
diff --git a/drivers/media/platform/msm/wfd/mdp-5-subdev.c b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
index 5b49498..4089a99 100644
--- a/drivers/media/platform/msm/wfd/mdp-5-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-5-subdev.c
@@ -47,6 +47,10 @@
 		WFD_MSG_ERR("Invalid arguments\n");
 		rc = -EINVAL;
 		goto mdp_open_fail;
+	} else if (mops->secure) {
+		/* Deprecated API; use MDP_SECURE ioctl */
+		WFD_MSG_ERR("Deprecated API for securing subdevice\n");
+		return -ENOTSUPP;
 	}
 
 	fbi = msm_fb_get_writeback_fb();
@@ -120,6 +124,8 @@
 	struct fb_info *fbi = NULL;
 	if (inst) {
 		fbi = (struct fb_info *)inst->mdp;
+		if (inst->secure)
+			msm_fb_writeback_set_secure(inst->mdp, false);
 		msm_fb_writeback_terminate(fbi);
 		kfree(inst);
 		/* Unregister wfd node from switch driver */
@@ -193,10 +199,10 @@
 
 int mdp_mmap(struct v4l2_subdev *sd, void *arg)
 {
-	int rc = 0;
+	int rc = 0, align = 0;
 	struct mem_region_map *mmap = arg;
 	struct mem_region *mregion;
-	bool domain = -1;
+	int domain = -1;
 	struct mdp_instance *inst = NULL;
 
 	if (!mmap || !mmap->mregion || !mmap->cookie) {
@@ -206,17 +212,41 @@
 
 	inst = mmap->cookie;
 	mregion = mmap->mregion;
-	if (mregion->size % SZ_4K != 0) {
-		WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K);
+	align = inst->secure ? SZ_1M : SZ_4K;
+	if (mregion->size % align != 0) {
+		WFD_MSG_ERR("Memregion not aligned to %d\n", align);
 		return -EINVAL;
 	}
 
-	domain = msm_fb_get_iommu_domain();
+	if (inst->secure) {
+		rc = msm_ion_secure_buffer(mmap->ion_client,
+			mregion->ion_handle, VIDEO_PIXEL, 0);
+		if (rc) {
+			WFD_MSG_ERR("Failed to secure input buffer\n");
+			goto secure_fail;
+		}
+	}
+
+	domain = msm_fb_get_iommu_domain(inst->mdp,
+			inst->secure ? MDP_IOMMU_DOMAIN_CP :
+					MDP_IOMMU_DOMAIN_NS);
+
 	rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle,
-			domain, 0, SZ_4K, 0,
+			domain, 0, align, 0,
 			(unsigned long *)&mregion->paddr,
 			(unsigned long *)&mregion->size,
 			0, 0);
+	if (rc) {
+		WFD_MSG_ERR("Failed to map into %ssecure domain: %d\n",
+				!inst->secure ? "non" : "", rc);
+		goto iommu_fail;
+	}
+
+	return 0;
+iommu_fail:
+	if (inst->secure)
+		msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+secure_fail:
 	return rc;
 }
 
@@ -224,7 +254,7 @@
 {
 	struct mem_region_map *mmap = arg;
 	struct mem_region *mregion;
-	bool domain = -1;
+	int domain = -1;
 	struct mdp_instance *inst = NULL;
 
 	if (!mmap || !mmap->mregion || !mmap->cookie) {
@@ -235,13 +265,37 @@
 	inst = mmap->cookie;
 	mregion = mmap->mregion;
 
-	domain = msm_fb_get_iommu_domain();
+	domain = msm_fb_get_iommu_domain(inst->mdp,
+			inst->secure ? MDP_IOMMU_DOMAIN_CP :
+					MDP_IOMMU_DOMAIN_NS);
 	ion_unmap_iommu(mmap->ion_client,
 			mregion->ion_handle,
 			domain, 0);
+
+	if (inst->secure)
+		msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
+
 	return 0;
 }
 
+int mdp_secure(struct v4l2_subdev *sd, void *arg)
+{
+	struct mdp_instance *inst = NULL;
+	int rc = 0;
+
+	if (!arg) {
+		WFD_MSG_ERR("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	inst = arg;
+	rc = msm_fb_writeback_set_secure(inst->mdp, true);
+	if (!rc)
+		inst->secure = true;
+
+	return rc;
+}
+
 long mdp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
 {
 	int rc = 0;
@@ -277,6 +331,9 @@
 	case MDP_MUNMAP:
 		rc = mdp_munmap(sd, arg);
 		break;
+	case MDP_SECURE:
+		rc = mdp_secure(sd, arg);
+		break;
 	default:
 		WFD_MSG_ERR("IOCTL: %u not supported\n", cmd);
 		rc = -EINVAL;
diff --git a/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c b/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c
index b2db208..2242c76 100644
--- a/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c
+++ b/drivers/media/platform/msm/wfd/mdp-dummy-subdev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -28,11 +28,12 @@
 	struct mutex mutex;
 };
 
-int mdp_init(struct v4l2_subdev *sd, u32 val)
+static int mdp_init(struct v4l2_subdev *sd, u32 val)
 {
 	return 0;
 }
-int mdp_open(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_open(struct v4l2_subdev *sd, void *arg)
 {
 	struct mdp_instance *inst = kzalloc(sizeof(struct mdp_instance),
 					GFP_KERNEL);
@@ -50,49 +51,54 @@
 	return rc;
 }
 
-int mdp_start(struct v4l2_subdev *sd, void *arg)
+static int mdp_start(struct v4l2_subdev *sd, void *arg)
 {
 	return 0;
 }
-int mdp_stop(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_stop(struct v4l2_subdev *sd, void *arg)
 {
 	return 0;
 }
-int mdp_close(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_close(struct v4l2_subdev *sd, void *arg)
 {
 	return 0;
 }
-int mdp_q_buffer(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_q_buffer(struct v4l2_subdev *sd, void *arg)
 {
 	static int foo;
 	int rc = 0;
 	struct mdp_buf_info *binfo = arg;
 	struct mdp_instance *inst = NULL;
+	struct mdp_buf_queue *new_entry = NULL;
 
 	if (!binfo || !binfo->inst || !binfo->cookie) {
 		WFD_MSG_ERR("Invalid argument\n");
 		return -EINVAL;
 	}
 
-
 	inst = binfo->inst;
-	if (binfo->kvaddr) {
-		struct mdp_buf_queue *new_entry = kzalloc(sizeof(*new_entry),
-				GFP_KERNEL);
-		memset((void *)binfo->kvaddr, foo++, 1024);
-		new_entry->mdp_buf_info = *binfo;
-		mutex_lock(&inst->mutex);
-		list_add_tail(&new_entry->node, &inst->mdp_bufs.node);
-		mutex_unlock(&inst->mutex);
-		WFD_MSG_DBG("Queue %p with cookie %p\n",
-			(void *)binfo->paddr, (void *)binfo->cookie);
-	} else {
-		rc = -EINVAL;
-	}
+	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
 
+	new_entry->mdp_buf_info = *binfo;
+	if (binfo->kvaddr)
+		memset((void *)binfo->kvaddr, foo++, 1024);
+
+
+	mutex_lock(&inst->mutex);
+	list_add_tail(&new_entry->node, &inst->mdp_bufs.node);
+	mutex_unlock(&inst->mutex);
+
+	WFD_MSG_DBG("Queue %p with cookie %p\n",
+			(void *)binfo->paddr, (void *)binfo->cookie);
 	return rc;
 }
-int mdp_dq_buffer(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_dq_buffer(struct v4l2_subdev *sd, void *arg)
 {
 	struct mdp_buf_info *binfo = arg;
 	struct mdp_buf_queue *head = NULL;
@@ -121,12 +127,13 @@
 	return 0;
 
 }
-int mdp_set_prop(struct v4l2_subdev *sd, void *arg)
+
+static int mdp_set_prop(struct v4l2_subdev *sd, void *arg)
 {
 	return 0;
 }
 
-int mdp_mmap(struct v4l2_subdev *sd, void *arg)
+static int mdp_mmap(struct v4l2_subdev *sd, void *arg)
 {
 	int rc = 0;
 	struct mem_region_map *mmap = arg;
@@ -137,12 +144,17 @@
 	return rc;
 }
 
-int mdp_munmap(struct v4l2_subdev *sd, void *arg)
+static int mdp_munmap(struct v4l2_subdev *sd, void *arg)
 {
 	/* Whatever */
 	return 0;
 }
 
+static int mdp_secure(struct v4l2_subdev *sd)
+{
+	return 0;
+}
+
 long mdp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
 {
 	int rc = 0;
@@ -178,6 +190,9 @@
 	case MDP_MUNMAP:
 		rc = mdp_munmap(sd, arg);
 		break;
+	case MDP_SECURE:
+		rc = mdp_secure(sd);
+		break;
 	default:
 		WFD_MSG_ERR("IOCTL: %u not supported\n", cmd);
 		rc = -EINVAL;
diff --git a/drivers/media/platform/msm/wfd/mdp-subdev.h b/drivers/media/platform/msm/wfd/mdp-subdev.h
index b04d448..f2c6fb1 100644
--- a/drivers/media/platform/msm/wfd/mdp-subdev.h
+++ b/drivers/media/platform/msm/wfd/mdp-subdev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -59,6 +59,7 @@
 #define MDP_STOP  _IOR(MDP_MAGIC_IOCTL, 7, void *)
 #define MDP_MMAP  _IOR(MDP_MAGIC_IOCTL, 8, struct mem_region_map *)
 #define MDP_MUNMAP  _IOR(MDP_MAGIC_IOCTL, 9, struct mem_region_map *)
+#define MDP_SECURE  _IO(MDP_MAGIC_IOCTL, 9)
 
 
 extern int mdp_init(struct v4l2_subdev *sd, u32 val);
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index 3b732ae..9fb7c6d 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -52,7 +52,7 @@
 	struct v4l2_subdev enc_sdev;
 	struct v4l2_subdev vsg_sdev;
 	struct ion_client *ion_client;
-	bool secure_device;
+	bool secure;
 	bool in_use;
 	bool mdp_iommu_split_domain;
 };
@@ -154,16 +154,16 @@
 {
 	struct ion_handle *handle = NULL;
 	void *kvaddr = NULL;
-	unsigned int alloc_regions = 0;
-	unsigned int ion_flags = 0;
+	unsigned int alloc_regions = 0, ion_flags = 0, align = 0;
 	int rc = 0;
 
 	alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID);
 	alloc_regions |= secure ? 0 :
 				ION_HEAP(ION_IOMMU_HEAP_ID);
 	ion_flags |= secure ? ION_SECURE : 0;
-	handle = ion_alloc(client,
-			mregion->size, SZ_4K, alloc_regions, ion_flags);
+	align = secure ? SZ_1M : SZ_4K;
+	handle = ion_alloc(client, mregion->size, align,
+			alloc_regions, ion_flags);
 
 	if (IS_ERR_OR_NULL(handle)) {
 		WFD_MSG_ERR("Failed to allocate input buffer\n");
@@ -171,12 +171,16 @@
 		goto alloc_fail;
 	}
 
-	kvaddr = ion_map_kernel(client, handle);
+	if (!secure) {
+		kvaddr = ion_map_kernel(client, handle);
 
-	if (IS_ERR_OR_NULL(kvaddr)) {
-		WFD_MSG_ERR("Failed to get virtual addr\n");
-		rc = PTR_ERR(kvaddr);
-		goto alloc_fail;
+		if (IS_ERR_OR_NULL(kvaddr)) {
+			WFD_MSG_ERR("Failed to get virtual addr\n");
+			rc = PTR_ERR(kvaddr);
+			goto alloc_fail;
+		}
+	} else {
+		kvaddr = NULL;
 	}
 
 	mregion->kvaddr = kvaddr;
@@ -206,7 +210,8 @@
 				"Invalid client or region");
 		return -EINVAL;
 	}
-	ion_unmap_kernel(client, mregion->ion_handle);
+	if (mregion->kvaddr)
+		ion_unmap_kernel(client, mregion->ion_handle);
 	ion_free(client, mregion->ion_handle);
 	return 0;
 }
@@ -256,7 +261,7 @@
 		enc_mregion->size = ALIGN(inst->input_buf_size, SZ_4K);
 
 		rc = wfd_allocate_ion_buffer(wfd_dev->ion_client,
-				wfd_dev->secure_device, enc_mregion);
+				wfd_dev->secure, enc_mregion);
 		if (rc) {
 			WFD_MSG_ERR("Failed to allocate input memory\n");
 			goto alloc_fail;
@@ -391,6 +396,7 @@
 				&inst->input_mem_list) {
 			mpair = list_entry(ptr, struct mem_region_pair,
 						list);
+
 			rc = v4l2_subdev_call(&wfd_dev->enc_sdev,
 					core, ioctl, FREE_INPUT_BUFFER,
 					(void *)mpair->enc);
@@ -1004,8 +1010,31 @@
 {
 	int rc = 0;
 	struct wfd_device *wfd_dev = video_drvdata(filp);
-	rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
-			ioctl, SET_PROP, a);
+	struct wfd_inst *inst = filp->private_data;
+
+	switch (a->id) {
+	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
+		rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
+				ioctl, ENC_SECURE, NULL);
+		if (rc) {
+			WFD_MSG_ERR("Couldn't secure encoder");
+			break;
+		}
+
+		rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core,
+				ioctl, MDP_SECURE, (void *)inst->mdp_inst);
+		if (rc) {
+			WFD_MSG_ERR("Couldn't secure MDP");
+			break;
+		}
+
+		wfd_dev->secure = true;
+		break;
+	default:
+		rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
+				ioctl, SET_PROP, a);
+	}
+
 	if (rc)
 		WFD_MSG_ERR("Failed to set encoder property\n");
 	return rc;
@@ -1355,7 +1384,7 @@
 
 	wfd_stats_init(&inst->stats, MINOR(filp->f_dentry->d_inode->i_rdev));
 
-	mdp_mops.secure = wfd_dev->secure_device;
+	mdp_mops.secure = wfd_dev->secure;
 	mdp_mops.iommu_split_domain = wfd_dev->mdp_iommu_split_domain;
 	rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_OPEN,
 				(void *)&mdp_mops);
@@ -1373,7 +1402,7 @@
 	enc_mops.op_buffer_done = venc_op_buffer_done;
 	enc_mops.ip_buffer_done = venc_ip_buffer_done;
 	enc_mops.cbdata = filp;
-	enc_mops.secure = wfd_dev->secure_device;
+	enc_mops.secure = wfd_dev->secure;
 	rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, OPEN,
 				(void *)&enc_mops);
 	if (rc || !enc_mops.cookie) {
@@ -1421,22 +1450,21 @@
 	inst = filp->private_data;
 	if (inst) {
 		wfdioc_streamoff(filp, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		vb2_queue_release(&inst->vid_bufq);
+		wfd_free_input_buffers(wfd_dev, inst);
+
 		rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
 				MDP_CLOSE, (void *)inst->mdp_inst);
 		if (rc)
 			WFD_MSG_ERR("Failed to CLOSE mdp subdevice: %d\n", rc);
 
-		vb2_queue_release(&inst->vid_bufq);
-		wfd_free_input_buffers(wfd_dev, inst);
 		rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
 				CLOSE, (void *)inst->venc_inst);
-
 		if (rc)
 			WFD_MSG_ERR("Failed to CLOSE enc subdev: %d\n", rc);
 
 		rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
 				VSG_CLOSE, NULL);
-
 		if (rc)
 			WFD_MSG_ERR("Failed to CLOSE vsg subdev: %d\n", rc);
 
@@ -1604,7 +1632,7 @@
 
 		switch (WFD_DEVICE_NUMBER_BASE + c) {
 		case WFD_DEVICE_SECURE:
-			wfd_dev[c].secure_device = true;
+			wfd_dev[c].secure = true;
 			break;
 		default:
 			break;
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index afb40be..11a8f4d 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3956,10 +3956,16 @@
 	return 0;
 }
 
+static const struct of_device_id iris_fm_match[] = {
+	{.compatible = "qcom,iris_fm"},
+	{}
+};
+
 static struct platform_driver iris_driver = {
 	.driver = {
 		.owner  = THIS_MODULE,
 		.name   = "iris_fm",
+		.of_match_table = iris_fm_match,
 	},
 	.remove = __devexit_p(iris_remove),
 };
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 8aa4758..9e22ffb 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -663,6 +663,9 @@
 			&resp, sizeof(resp));
 		if (ret) {
 			pr_err("scm_call to load app failed\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			qsee_disable_clock_vote(data, CLK_SFPB);
 			return -EINVAL;
 		}
 
@@ -1524,8 +1527,12 @@
 	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
 	memcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
 	ret = __qseecom_check_app_exists(app_ireq);
-	if (ret < 0)
+	if (ret < 0) {
+		kzfree(data);
+		kfree(*handle);
+		*handle = NULL;
 		return -EINVAL;
+	}
 
 	if (ret > 0) {
 		pr_warn("App id %d for [%s] app exists\n", ret,
@@ -1554,6 +1561,7 @@
 
 		if (ret < 0) {
 			kfree(*handle);
+			kfree(data);
 			*handle = NULL;
 			return ret;
 		}
@@ -1563,6 +1571,9 @@
 		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 		if (!entry) {
 			pr_err("kmalloc failed\n");
+			kfree(data);
+			kfree(*handle);
+			*handle = NULL;
 			return -ENOMEM;
 		}
 		entry->app_id = ret;
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
index 0c93ef2..c6c8fc9 100644
--- a/drivers/misc/qseecom_kernel.h
+++ b/drivers/misc/qseecom_kernel.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,12 @@
 #define __QSEECOM_KERNEL_H_
 
 #include <linux/types.h>
+
+#define QSEECOM_ALIGN_SIZE	0x40
+#define QSEECOM_ALIGN_MASK	(QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x)	\
+	((x + QSEECOM_ALIGN_SIZE) & (~QSEECOM_ALIGN_MASK))
+
 /*
  * struct qseecom_handle -
  *      Handle to the qseecom device for kernel clients
diff --git a/drivers/misc/smsc_hub.c b/drivers/misc/smsc_hub.c
index bc338a4..41d9ff8 100644
--- a/drivers/misc/smsc_hub.c
+++ b/drivers/misc/smsc_hub.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -471,7 +471,8 @@
 	}
 	pm_runtime_disable(&pdev->dev);
 
-	regulator_disable(smsc_hub->hub_vbus_reg);
+	if (!IS_ERR(smsc_hub->hub_vbus_reg))
+		regulator_disable(smsc_hub->hub_vbus_reg);
 	msm_hsic_hub_init_gpio(smsc_hub, 0);
 	msm_hsic_hub_init_clock(smsc_hub, 0);
 	msm_hsic_hub_init_vdd(smsc_hub, 0);
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 9a53817..9598d45 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -763,6 +763,11 @@
 /*** Clock functions ***/
 static int tspp_clock_start(struct tspp_device *device)
 {
+	if (device == NULL) {
+		pr_err("tspp: Can't start clocks, invalid device\n");
+		return -EINVAL;
+	}
+
 	if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) {
 		pr_err("tspp: Can't start pclk");
 		return -EBUSY;
@@ -780,11 +785,16 @@
 
 static void tspp_clock_stop(struct tspp_device *device)
 {
+	if (device == NULL) {
+		pr_err("tspp: Can't stop clocks, invalid device\n");
+		return;
+	}
+
 	if (device->tsif_pclk)
-		clk_disable(device->tsif_pclk);
+		clk_disable_unprepare(device->tsif_pclk);
 
 	if (device->tsif_ref_clk)
-		clk_disable(device->tsif_ref_clk);
+		clk_disable_unprepare(device->tsif_ref_clk);
 }
 
 /*** TSIF functions ***/
@@ -1458,7 +1468,10 @@
 
 	/* start the clocks if needed */
 	if (tspp_channels_in_use(pdev) == 0) {
-		tspp_clock_start(pdev);
+		rc = tspp_clock_start(pdev);
+		if (rc)
+			return rc;
+
 		wake_lock(&pdev->wake_lock);
 	}
 
@@ -1637,6 +1650,8 @@
 		tspp_clock_stop(pdev);
 	}
 
+	pm_runtime_put(&pdev->pdev->dev);
+
 	return 0;
 }
 EXPORT_SYMBOL(tspp_close_channel);
@@ -3021,6 +3036,7 @@
 {
 	struct tspp_channel *channel;
 	u32 i;
+	int rc;
 
 	struct tspp_device *device = platform_get_drvdata(pdev);
 
@@ -3033,9 +3049,11 @@
 	}
 
 	/* de-registering BAM device requires clocks */
-	tspp_clock_start(device);
-	sps_deregister_bam_device(device->bam_handle);
-	tspp_clock_stop(device);
+	rc = tspp_clock_start(device);
+	if (rc == 0) {
+		sps_deregister_bam_device(device->bam_handle);
+		tspp_clock_stop(device);
+	}
 
 	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
 		tsif_debugfs_exit(&device->tsif[i]);
@@ -3058,7 +3076,7 @@
 		clk_put(device->tsif_pclk);
 
 	pm_runtime_disable(&pdev->dev);
-	pm_runtime_put(&pdev->dev);
+
 	kfree(device);
 
 	return 0;
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 2ca585d..b81af11 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -55,7 +55,7 @@
 
 	for (i = 0; i < nr_strings; i++) {
 		buffer[i] = string;
-		strlcpy(string, buf, sizeof(string));
+		strlcpy(string, buf, strlen(buf));
 		string += strlen(string) + 1;
 		buf += strlen(buf) + 1;
 	}
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index b386266..7669ea3 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -3366,20 +3366,7 @@
 			writel_relaxed(clk, host->base + MMCICLOCK);
 			msmsdcc_sync_reg_wr(host);
 
-			/*
-			 * Make sure that we don't double the clock if
-			 * doubled clock rate is already set
-			 */
-			if (!host->ddr_doubled_clk_rate ||
-				(host->ddr_doubled_clk_rate &&
-				(host->ddr_doubled_clk_rate != ios->clock))) {
-				host->ddr_doubled_clk_rate =
-					msmsdcc_get_sup_clk_rate(
-						host, (ios->clock * 2));
-				clock = host->ddr_doubled_clk_rate;
-			}
-		} else {
-			host->ddr_doubled_clk_rate = 0;
+			clock = msmsdcc_get_sup_clk_rate(host, ios->clock * 2);
 		}
 
 		if (clock != host->clk_rate) {
@@ -6116,7 +6103,6 @@
 	mmc->caps2 |= (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_DETECT_ON_ERR);
 	mmc->caps2 |= MMC_CAP2_SANITIZE;
 	mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
-	mmc->caps2 |= MMC_CAP2_INIT_BKOPS;
 	mmc->caps2 |= MMC_CAP2_POWEROFF_NOTIFY;
 
 	if (plat->nonremovable)
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index b5522fb..4ed2d96 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -368,7 +368,6 @@
 
 	unsigned int		clk_rate;	/* Current clock rate */
 	unsigned int		pclk_rate;
-	unsigned int		ddr_doubled_clk_rate;
 
 	u32			pwr;
 	struct mmc_platform_data *plat;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6451d62..8c2bea09 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -68,51 +68,51 @@
 
 static void sdhci_dumpregs(struct sdhci_host *host)
 {
-	printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+	pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
 		mmc_hostname(host->mmc));
 
-	printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
+	pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
 		sdhci_readl(host, SDHCI_DMA_ADDRESS),
 		sdhci_readw(host, SDHCI_HOST_VERSION));
-	printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
+	pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
 		sdhci_readw(host, SDHCI_BLOCK_SIZE),
 		sdhci_readw(host, SDHCI_BLOCK_COUNT));
-	printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+	pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
 		sdhci_readl(host, SDHCI_ARGUMENT),
 		sdhci_readw(host, SDHCI_TRANSFER_MODE));
-	printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
+	pr_debug(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
 		sdhci_readl(host, SDHCI_PRESENT_STATE),
 		sdhci_readb(host, SDHCI_HOST_CONTROL));
-	printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
+	pr_debug(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
 		sdhci_readb(host, SDHCI_POWER_CONTROL),
 		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
-	printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
+	pr_debug(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
 		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
 		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
-	printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
+	pr_debug(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
 		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
 		sdhci_readl(host, SDHCI_INT_STATUS));
-	printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+	pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
 		sdhci_readl(host, SDHCI_INT_ENABLE),
 		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
-	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+	pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
 		sdhci_readw(host, SDHCI_ACMD12_ERR),
 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
-	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
+	pr_debug(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 		sdhci_readl(host, SDHCI_CAPABILITIES),
 		sdhci_readl(host, SDHCI_CAPABILITIES_1));
-	printk(KERN_DEBUG DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
+	pr_debug(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
 		sdhci_readw(host, SDHCI_COMMAND),
 		sdhci_readl(host, SDHCI_MAX_CURRENT));
-	printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
+	pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
 		sdhci_readw(host, SDHCI_HOST_CONTROL2));
 
 	if (host->flags & SDHCI_USE_ADMA)
-		printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+		pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
 		       readl(host->ioaddr + SDHCI_ADMA_ERROR),
 		       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
 
-	printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
+	pr_debug(DRIVER_NAME ": ===========================================\n");
 }
 
 /*****************************************************************************\
@@ -144,14 +144,15 @@
 
 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
 {
-	u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
+	u32 present, irqs;
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
 	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
 		return;
 
-	if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION)
-		return;
+	present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			      SDHCI_CARD_PRESENT;
+	irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
 
 	if (enable)
 		sdhci_unmask_irqs(host, irqs);
@@ -194,10 +195,14 @@
 	/* Wait max 100 ms */
 	timeout = 100;
 
+	if (host->ops->check_power_status && host->pwr &&
+	    (mask & SDHCI_RESET_ALL))
+		host->ops->check_power_status(host);
+
 	/* hw clears the bit when it's done */
 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
 		if (timeout == 0) {
-			printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
+			pr_err("%s: Reset 0x%x never completed.\n",
 				mmc_hostname(host->mmc), (int)mask);
 			sdhci_dumpregs(host);
 			return;
@@ -211,6 +216,11 @@
 
 	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
 		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
+
+	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+		if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
+			host->ops->enable_dma(host);
+	}
 }
 
 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
@@ -648,12 +658,11 @@
 	/* timeout in us */
 	if (!data)
 		target_timeout = cmd->cmd_timeout_ms * 1000;
-	else
-		target_timeout = data->timeout_ns / 1000 +
-			data->timeout_clks / host->clock;
-
-	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
-		host->timeout_clk = host->clock / 1000;
+	else {
+		target_timeout = data->timeout_ns / 1000;
+		if (host->clock)
+			target_timeout += data->timeout_clks / host->clock;
+	}
 
 	/*
 	 * Figure out needed cycles.
@@ -665,7 +674,6 @@
 	 *     =>
 	 *     (1) / (2) > 2^6
 	 */
-	BUG_ON(!host->timeout_clk);
 	count = 0;
 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
 	while (current_timeout < target_timeout) {
@@ -675,8 +683,11 @@
 			break;
 	}
 
-	if (count >= 0xF)
+	if (count >= 0xF) {
+		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+		    mmc_hostname(host->mmc), count, cmd->opcode);
 		count = 0xE;
+	}
 
 	return count;
 }
@@ -884,8 +895,13 @@
 		}
 	}
 
-	if (data->flags & MMC_DATA_READ)
+	if (data->flags & MMC_DATA_READ) {
 		mode |= SDHCI_TRNS_READ;
+		if (host->ops->toggle_cdr)
+			host->ops->toggle_cdr(host, true);
+	}
+	if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
+		host->ops->toggle_cdr(host, false);
 	if (host->flags & SDHCI_REQ_USE_DMA)
 		mode |= SDHCI_TRNS_DMA;
 
@@ -968,7 +984,7 @@
 
 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
 		if (timeout == 0) {
-			printk(KERN_ERR "%s: Controller never released "
+			pr_err("%s: Controller never released "
 				"inhibit bit(s).\n", mmc_hostname(host->mmc));
 			sdhci_dumpregs(host);
 			cmd->error = -EIO;
@@ -990,7 +1006,7 @@
 	sdhci_set_transfer_mode(host, cmd);
 
 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
-		printk(KERN_ERR "%s: Unsupported response type!\n",
+		pr_err("%s: Unsupported response type!\n",
 			mmc_hostname(host->mmc));
 		cmd->error = -EINVAL;
 		tasklet_schedule(&host->finish_tasklet);
@@ -1063,12 +1079,15 @@
 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 {
 	int div = 0; /* Initialized for compiler warning */
+	int real_div = div, clk_mul = 1;
 	u16 clk = 0;
 	unsigned long timeout;
 
 	if (clock && clock == host->clock)
 		return;
 
+	host->mmc->actual_clock = 0;
+
 	if (host->ops->set_clock) {
 		host->ops->set_clock(host, clock);
 		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
@@ -1106,6 +1125,8 @@
 				 * Control register.
 				 */
 				clk = SDHCI_PROG_CLOCK_MODE;
+				real_div = div;
+				clk_mul = host->clk_mul;
 				div--;
 			}
 		} else {
@@ -1119,6 +1140,7 @@
 						break;
 				}
 			}
+			real_div = div;
 			div >>= 1;
 		}
 	} else {
@@ -1127,9 +1149,13 @@
 			if ((host->max_clk / div) <= clock)
 				break;
 		}
+		real_div = div;
 		div >>= 1;
 	}
 
+	if (real_div)
+		host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+
 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
 		<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1141,7 +1167,7 @@
 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
 		& SDHCI_CLOCK_INT_STABLE)) {
 		if (timeout == 0) {
-			printk(KERN_ERR "%s: Internal clock never "
+			pr_err("%s: Internal clock never "
 				"stabilised.\n", mmc_hostname(host->mmc));
 			sdhci_dumpregs(host);
 			return;
@@ -1157,7 +1183,7 @@
 	host->clock = clock;
 }
 
-static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
+static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
 {
 	u8 pwr = 0;
 
@@ -1180,32 +1206,42 @@
 	}
 
 	if (host->pwr == pwr)
-		return;
+		return -1;
 
 	host->pwr = pwr;
 
 	if (pwr == 0) {
 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
-		return;
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host);
+		return 0;
 	}
 
 	/*
 	 * Spec says that we should clear the power reg before setting
 	 * a new value. Some controllers don't seem to like this though.
 	 */
-	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host);
+	}
 
 	/*
 	 * At least the Marvell CaFe chip gets confused if we set the voltage
 	 * and set turn on power at the same time, so set the voltage first.
 	 */
-	if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+	if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host);
+	}
 
 	pwr |= SDHCI_POWER_ON;
 
 	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+	if (host->ops->check_power_status)
+		host->ops->check_power_status(host);
 
 	/*
 	 * Some controllers need an extra 10ms delay of 10ms before they
@@ -1213,6 +1249,8 @@
 	 */
 	if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
 		mdelay(10);
+
+	return power;
 }
 
 /*****************************************************************************\
@@ -1294,12 +1332,14 @@
 static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
 {
 	unsigned long flags;
+	int vdd_bit = -1;
 	u8 ctrl;
 
-	spin_lock_irqsave(&host->lock, flags);
-
-	if (host->flags & SDHCI_DEVICE_DEAD)
-		goto out;
+	if (host->flags & SDHCI_DEVICE_DEAD) {
+		if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
+			mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
+		return;
+	}
 
 	/*
 	 * Reset the chip on each power off.
@@ -1313,9 +1353,14 @@
 	sdhci_set_clock(host, ios->clock);
 
 	if (ios->power_mode == MMC_POWER_OFF)
-		sdhci_set_power(host, -1);
+		vdd_bit = sdhci_set_power(host, -1);
 	else
-		sdhci_set_power(host, ios->vdd);
+		vdd_bit = sdhci_set_power(host, ios->vdd);
+
+	if (host->vmmc && vdd_bit != -1)
+		mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
+
+	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->ops->platform_send_init_74_clocks)
 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1442,7 +1487,6 @@
 	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 
-out:
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 }
@@ -1572,6 +1616,8 @@
 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
 		ctrl &= ~SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host);
 
 		/* Wait for 5ms */
 		usleep_range(5000, 5500);
@@ -1581,7 +1627,7 @@
 		if (!(ctrl & SDHCI_CTRL_VDD_180))
 			return 0;
 		else {
-			printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
+			pr_info(DRIVER_NAME ": Switching to 3.3V "
 				"signalling voltage failed\n");
 			return -EIO;
 		}
@@ -1602,6 +1648,8 @@
 			 */
 			ctrl |= SDHCI_CTRL_VDD_180;
 			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+			if (host->ops->check_power_status)
+				host->ops->check_power_status(host);
 
 			/* Wait for 5ms */
 			usleep_range(5000, 5500);
@@ -1634,13 +1682,17 @@
 		pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
 		pwr &= ~SDHCI_POWER_ON;
 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host);
 
 		/* Wait for 1ms as per the spec */
 		usleep_range(1000, 1500);
 		pwr |= SDHCI_POWER_ON;
 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host);
 
-		printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
+		pr_info(DRIVER_NAME ": Switching to 1.8V signalling "
 			"voltage failed, retrying with S18R set to 0\n");
 		return -EAGAIN;
 	} else
@@ -1666,7 +1718,7 @@
 {
 	struct sdhci_host *host;
 	u16 ctrl;
-	u32 ier;
+	u32 ier = 0;
 	int tuning_loop_counter = MAX_TUNING_LOOP;
 	unsigned long timeout;
 	int err = 0;
@@ -1687,9 +1739,9 @@
 	 * If the Host Controller supports the HS200 mode then the
 	 * tuning function has to be executed.
 	 */
-	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
-	    (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
-	     host->flags & SDHCI_HS200_NEEDS_TUNING))
+	if ((((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+	    (host->flags & SDHCI_SDR50_NEEDS_TUNING)) ||
+	     (host->flags & SDHCI_HS200_NEEDS_TUNING))
 		requires_tuning_nonuhs = true;
 
 	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
@@ -1702,6 +1754,14 @@
 		return 0;
 	}
 
+	if (host->ops->execute_tuning) {
+		spin_unlock(&host->lock);
+		enable_irq(host->irq);
+		host->ops->execute_tuning(host, opcode);
+		disable_irq(host->irq);
+		spin_lock(&host->lock);
+		goto out;
+	}
 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
 
 	/*
@@ -1780,7 +1840,7 @@
 		spin_lock(&host->lock);
 
 		if (!host->tuning_done) {
-			printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
+			pr_info(DRIVER_NAME ": Timeout waiting for "
 				"Buffer Read Ready interrupt during tuning "
 				"procedure, falling back to fixed sampling "
 				"clock\n");
@@ -1810,7 +1870,7 @@
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
 	} else {
 		if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
-			printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
+			pr_info(DRIVER_NAME ": Tuning procedure"
 				" failed, falling back to fixed sampling"
 				" clock\n");
 			err = -EIO;
@@ -1925,9 +1985,9 @@
 	/* Check host->mrq first in case we are runtime suspended */
 	if (host->mrq &&
 	    !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
-		printk(KERN_ERR "%s: Card removed during transfer!\n",
+		pr_err("%s: Card removed during transfer!\n",
 			mmc_hostname(host->mmc));
-		printk(KERN_ERR "%s: Resetting controller.\n",
+		pr_err("%s: Resetting controller.\n",
 			mmc_hostname(host->mmc));
 
 		sdhci_reset(host, SDHCI_RESET_CMD);
@@ -2016,7 +2076,7 @@
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->mrq) {
-		printk(KERN_ERR "%s: Timeout waiting for hardware "
+		pr_err("%s: Timeout waiting for hardware "
 			"interrupt.\n", mmc_hostname(host->mmc));
 		sdhci_dumpregs(host);
 
@@ -2062,7 +2122,7 @@
 	BUG_ON(intmask == 0);
 
 	if (!host->cmd) {
-		printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
+		pr_err("%s: Got command interrupt 0x%08x even "
 			"though no command operation was in progress.\n",
 			mmc_hostname(host->mmc), (unsigned)intmask);
 		sdhci_dumpregs(host);
@@ -2164,7 +2224,7 @@
 			}
 		}
 
-		printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
+		pr_err("%s: Got data interrupt 0x%08x even "
 			"though no data operation was in progress.\n",
 			mmc_hostname(host->mmc), (unsigned)intmask);
 		sdhci_dumpregs(host);
@@ -2181,7 +2241,7 @@
 			!= MMC_BUS_TEST_R)
 		host->data->error = -EILSEQ;
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
-		printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
+		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
 		sdhci_show_adma_error(host);
 		host->data->error = -EIO;
 	}
@@ -2245,7 +2305,7 @@
 
 	if (host->runtime_suspended) {
 		spin_unlock(&host->lock);
-		printk(KERN_WARNING "%s: got irq while runtime suspended\n",
+		pr_warning("%s: got irq while runtime suspended\n",
 		       mmc_hostname(host->mmc));
 		return IRQ_HANDLED;
 	}
@@ -2262,13 +2322,30 @@
 		mmc_hostname(host->mmc), intmask);
 
 	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+		u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+			      SDHCI_CARD_PRESENT;
+
+		/*
+		 * There is a observation on i.mx esdhc.  INSERT bit will be
+		 * immediately set again when it gets cleared, if a card is
+		 * inserted.  We have to mask the irq to prevent interrupt
+		 * storm which will freeze the system.  And the REMOVE gets
+		 * the same situation.
+		 *
+		 * More testing are needed here to ensure it works for other
+		 * platforms though.
+		 */
+		sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
+						SDHCI_INT_CARD_REMOVE);
+		sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
+						  SDHCI_INT_CARD_INSERT);
+
 		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
-			SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+			     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
 		tasklet_schedule(&host->card_tasklet);
 	}
 
-	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
-
 	if (intmask & SDHCI_INT_CMD_MASK) {
 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
 			SDHCI_INT_STATUS);
@@ -2286,7 +2363,7 @@
 	intmask &= ~SDHCI_INT_ERROR;
 
 	if (intmask & SDHCI_INT_BUS_POWER) {
-		printk(KERN_ERR "%s: Card is consuming too much power!\n",
+		pr_err("%s: Card is consuming too much power!\n",
 			mmc_hostname(host->mmc));
 		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
 	}
@@ -2299,9 +2376,6 @@
 	intmask &= ~SDHCI_INT_CARD_INT;
 
 	if (intmask) {
-		printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
-			mmc_hostname(host->mmc), intmask);
-		sdhci_dumpregs(host);
 		unexpected |= intmask;
 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
 	}
@@ -2339,6 +2413,7 @@
 int sdhci_suspend_host(struct sdhci_host *host)
 {
 	int ret;
+	bool has_tuning_timer;
 
 	if (host->ops->platform_suspend)
 		host->ops->platform_suspend(host);
@@ -2346,21 +2421,28 @@
 	sdhci_disable_card_detection(host);
 
 	/* Disable tuning since we are suspending */
-	if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
-	    host->tuning_mode == SDHCI_TUNING_MODE_1) {
+	has_tuning_timer = host->version >= SDHCI_SPEC_300 &&
+		host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1;
+	if (has_tuning_timer) {
 		del_timer_sync(&host->tuning_timer);
 		host->flags &= ~SDHCI_NEEDS_RETUNING;
 	}
 
 	ret = mmc_suspend_host(host->mmc);
-	if (ret)
+	if (ret) {
+		if (has_tuning_timer) {
+			host->flags |= SDHCI_NEEDS_RETUNING;
+			mod_timer(&host->tuning_timer, jiffies +
+					host->tuning_count * HZ);
+		}
+
+		sdhci_enable_card_detection(host);
+
 		return ret;
+	}
 
 	free_irq(host->irq, host);
 
-	if (host->vmmc)
-		ret = regulator_disable(host->vmmc);
-
 	return ret;
 }
 
@@ -2370,12 +2452,6 @@
 {
 	int ret;
 
-	if (host->vmmc) {
-		int ret = regulator_enable(host->vmmc);
-		if (ret)
-			return ret;
-	}
-
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 		if (host->ops->enable_dma)
 			host->ops->enable_dma(host);
@@ -2561,7 +2637,7 @@
 	host->version = (host->version & SDHCI_SPEC_VER_MASK)
 				>> SDHCI_SPEC_VER_SHIFT;
 	if (host->version > SDHCI_SPEC_300) {
-		printk(KERN_ERR "%s: Unknown controller version (%d). "
+		pr_err("%s: Unknown controller version (%d). "
 			"You may experience problems.\n", mmc_hostname(mmc),
 			host->version);
 	}
@@ -2598,7 +2674,7 @@
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 		if (host->ops->enable_dma) {
 			if (host->ops->enable_dma(host)) {
-				printk(KERN_WARNING "%s: No suitable DMA "
+				pr_warning("%s: No suitable DMA "
 					"available. Falling back to PIO.\n",
 					mmc_hostname(mmc));
 				host->flags &=
@@ -2618,7 +2694,7 @@
 		if (!host->adma_desc || !host->align_buffer) {
 			kfree(host->adma_desc);
 			kfree(host->align_buffer);
-			printk(KERN_WARNING "%s: Unable to allocate ADMA "
+			pr_warning("%s: Unable to allocate ADMA "
 				"buffers. Falling back to standard DMA.\n",
 				mmc_hostname(mmc));
 			host->flags &= ~SDHCI_USE_ADMA;
@@ -2646,46 +2722,13 @@
 	if (host->max_clk == 0 || host->quirks &
 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
 		if (!host->ops->get_max_clock) {
-			printk(KERN_ERR
-			       "%s: Hardware doesn't specify base clock "
+			pr_err("%s: Hardware doesn't specify base clock "
 			       "frequency.\n", mmc_hostname(mmc));
 			return -ENODEV;
 		}
 		host->max_clk = host->ops->get_max_clock(host);
 	}
 
-	host->timeout_clk =
-		(caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
-	if (host->timeout_clk == 0) {
-		if (host->ops->get_timeout_clock) {
-			host->timeout_clk = host->ops->get_timeout_clock(host);
-		} else if (!(host->quirks &
-				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
-			printk(KERN_ERR
-			       "%s: Hardware doesn't specify timeout clock "
-			       "frequency.\n", mmc_hostname(mmc));
-			return -ENODEV;
-		}
-	}
-	if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
-		host->timeout_clk *= 1000;
-
- 	/*
-	 * In case of Host Controller v3.00, find out whether clock
-	 * multiplier is supported.
-	 */
-	host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
-		SDHCI_CLOCK_MUL_SHIFT;
-
-	/*
-	 * In case the value in Clock Multiplier is 0, then programmable
-	 * clock mode is not supported, otherwise the actual clock
-	 * multiplier is one more than the value of Clock Multiplier
-	 * in the Capabilities Register.
-	 */
-	if (host->clk_mul)
-		host->clk_mul += 1;
-
 	/*
 	 * In case of Host Controller v3.00, find out whether clock
 	 * multiplier is supported.
@@ -2718,6 +2761,26 @@
 	} else
 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
+	host->timeout_clk =
+		(caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
+	if (host->timeout_clk == 0) {
+		if (host->ops->get_timeout_clock) {
+			host->timeout_clk = host->ops->get_timeout_clock(host);
+		} else if (!(host->quirks &
+				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
+			pr_err("%s: Hardware doesn't specify timeout clock "
+			       "frequency.\n", mmc_hostname(mmc));
+			return -ENODEV;
+		}
+	}
+	if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+		host->timeout_clk *= 1000;
+
+	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+		host->timeout_clk = mmc->f_max / 1000;
+
+	mmc->max_discard_to = (1 << 27) / host->timeout_clk;
+
 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
 
 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
@@ -2869,7 +2932,7 @@
 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
 
 	if (mmc->ocr_avail == 0) {
-		printk(KERN_ERR "%s: Hardware doesn't report any "
+		pr_err("%s: Hardware doesn't report any "
 			"support voltages.\n", mmc_hostname(mmc));
 		return -ENODEV;
 	}
@@ -2917,7 +2980,7 @@
 		mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
 				SDHCI_MAX_BLOCK_SHIFT;
 		if (mmc->max_blk_size >= 3) {
-			printk(KERN_WARNING "%s: Invalid maximum block size, "
+			pr_warning("%s: Invalid maximum block size, "
 				"assuming 512 bytes\n", mmc_hostname(mmc));
 			mmc->max_blk_size = 0;
 		}
@@ -2956,10 +3019,8 @@
 
 	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
 	if (IS_ERR(host->vmmc)) {
-		printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+		pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
 		host->vmmc = NULL;
-	} else {
-		regulator_enable(host->vmmc);
 	}
 
 	sdhci_init(host, 0);
@@ -2985,7 +3046,7 @@
 
 	mmc_add_host(mmc);
 
-	printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
+	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
 		(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
@@ -3018,7 +3079,7 @@
 		host->flags |= SDHCI_DEVICE_DEAD;
 
 		if (host->mrq) {
-			printk(KERN_ERR "%s: Controller removed during "
+			pr_err("%s: Controller removed during "
 				" transfer!\n", mmc_hostname(host->mmc));
 
 			host->mrq->cmd->error = -ENOMEDIUM;
@@ -3048,10 +3109,8 @@
 	tasklet_kill(&host->card_tasklet);
 	tasklet_kill(&host->finish_tasklet);
 
-	if (host->vmmc) {
-		regulator_disable(host->vmmc);
+	if (host->vmmc)
 		regulator_put(host->vmmc);
-	}
 
 	kfree(host->adma_desc);
 	kfree(host->align_buffer);
@@ -3077,9 +3136,9 @@
 
 static int __init sdhci_drv_init(void)
 {
-	printk(KERN_INFO DRIVER_NAME
+	pr_info(DRIVER_NAME
 		": Secure Digital Host Controller Interface driver\n");
-	printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
+	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
 
 	return 0;
 }
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f761f23..4f8d01d 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -277,6 +277,9 @@
 	void	(*hw_reset)(struct sdhci_host *host);
 	void	(*platform_suspend)(struct sdhci_host *host);
 	void	(*platform_resume)(struct sdhci_host *host);
+	void	(*check_power_status)(struct sdhci_host *host);
+	int	(*execute_tuning)(struct sdhci_host *host, u32 opcode);
+	void	(*toggle_cdr)(struct sdhci_host *host, bool enable);
 };
 
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
index 3fced2d..4e95614 100644
--- a/drivers/net/ethernet/msm/Kconfig
+++ b/drivers/net/ethernet/msm/Kconfig
@@ -42,6 +42,16 @@
 	help
 	  Debug stats on wakeup counts.
 
+config MSM_RMNET_WWAN
+	tristate "MSM RMNET WWAN Network Device"
+	depends on IPA
+	default n
+	help
+	  WWAN Network Driver
+	  Provides an API to embedded
+	  applications to send and receive
+	  the data to/from A2
+
 config QFEC
 	tristate "QFEC ethernet driver"
 	select MII
@@ -50,3 +60,9 @@
 	  This driver supports Ethernet in the FSM9xxx.
 	  To compile this driver as a module, choose M here: the
 	  module will be called qfec.
+
+config ECM_IPA
+	tristate "STD ECM LAN Driver support"
+	depends on IPA
+	help
+	  Allows LAN between Apps and tethered HOST on STD ECM
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
index 7d9d4c6..0afa00f 100644
--- a/drivers/net/ethernet/msm/Makefile
+++ b/drivers/net/ethernet/msm/Makefile
@@ -3,7 +3,9 @@
 #
 
 obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
+obj-$(CONFIG_MSM_RMNET_WWAN) += msm_rmnet_wwan.o
 obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
 obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
 obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o
 obj-$(CONFIG_QFEC) += qfec.o
+obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
new file mode 100644
index 0000000..605fd84
--- /dev/null
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -0,0 +1,1105 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <mach/ecm_ipa.h>
+
+#define DRIVER_NAME "ecm_ipa"
+#define DRIVER_VERSION "19-Feb-2013"
+#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
+#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6"
+#define IPA_TO_USB_CLIENT	IPA_CLIENT_USB_CONS
+#define INACTIVITY_MSEC_DELAY 100
+#define ECM_IPA_ERROR(fmt, args...) \
+	pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+			fmt, __func__, __LINE__, current->comm, ## args)
+#ifdef ECM_IPA_DEBUG_ON
+#define ECM_IPA_DEBUG(fmt, args...) \
+	pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+			fmt, __func__, __LINE__, current->comm, ## args)
+#else /* ECM_IPA_DEBUG_ON */
+#define ECM_IPA_DEBUG(fmt, args...)
+#endif /* ECM_IPA_DEBUG_ON */
+
+#define NULL_CHECK(ptr) \
+	do { \
+		if (!(ptr)) { \
+			ECM_IPA_ERROR("null pointer #ptr\n"); \
+			return -EINVAL; \
+		} \
+	} \
+	while (0)
+
+#define ECM_IPA_LOG_ENTRY() ECM_IPA_DEBUG("begin\n")
+#define ECM_IPA_LOG_EXIT() ECM_IPA_DEBUG("end\n")
+
+/**
+ * struct ecm_ipa_dev - main driver context parameters
+ * @ack_spinlock: protect last sent skb
+ * @last_out_skb: last sent skb saved until Tx notify is received from IPA
+ * @net: network interface struct implemented by this driver
+ * @folder: debugfs folder for various debuging switches
+ * @tx_enable: flag that enable/disable Tx path to continue to IPA
+ * @rx_enable: flag that enable/disable Rx path to continue to IPA
+ * @rm_enable: flag that enable/disable Resource manager request prior to Tx
+ * @dma_enable: flag that allow on-the-fly DMA mode for IPA
+ * @tx_file: saved debugfs entry to allow cleanup
+ * @rx_file: saved debugfs entry to allow cleanup
+ * @rm_file: saved debugfs entry to allow cleanup
+ * @dma_file: saved debugfs entry to allow cleanup
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ */
+struct ecm_ipa_dev {
+	spinlock_t ack_spinlock;
+	struct sk_buff *last_out_skb;
+	struct net_device *net;
+	bool tx_enable;
+	bool rx_enable;
+	bool rm_enable;
+	bool dma_enable;
+	struct dentry *folder;
+	struct dentry *tx_file;
+	struct dentry *rx_file;
+	struct dentry *rm_file;
+	struct dentry *dma_file;
+	uint32_t eth_ipv4_hdr_hdl;
+	uint32_t eth_ipv6_hdr_hdl;
+	u32 usb_to_ipa_hdl;
+	u32 ipa_to_usb_hdl;
+};
+
+/**
+ * struct ecm_ipa_ctx - saved pointer for the std ecm network device
+ *                which allow ecm_ipa to be a singleton
+ */
+static struct ecm_ipa_dev *ecm_ipa_ctx;
+
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl);
+static void sk_buff_print(struct sk_buff *skb);
+static int ecm_ipa_set_device_ethernet_addr(
+	u8 *dev_ethaddr, u8 device_ethaddr[]);
+static void ecm_ipa_packet_receive_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data);
+static void ecm_ipa_tx_complete_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data);
+static int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl);
+static int ecm_ipa_open(struct net_device *net);
+static int ecm_ipa_stop(struct net_device *net);
+static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
+					struct net_device *net);
+static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+		unsigned long data);
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *dev);
+static void ecm_ipa_destory_rm_resource(void);
+static bool rx_filter(struct sk_buff *skb);
+static bool tx_filter(struct sk_buff *skb);
+static bool rm_enabled(struct ecm_ipa_dev *dev);
+
+static int ecm_ipa_rules_cfg(struct ecm_ipa_dev *dev,
+		const void *dst_mac, const void *src_mac);
+static int ecm_ipa_register_tx(struct ecm_ipa_dev *dev);
+static void ecm_ipa_deregister_tx(struct ecm_ipa_dev *dev);
+static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *dev);
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *dev);
+static int ecm_ipa_debugfs_tx_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_rx_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_rm_open(struct inode *inode, struct file *file);
+static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file);
+static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos);
+static void eth_get_drvinfo(struct net_device *net,
+		struct ethtool_drvinfo *drv_info);
+
+static const struct net_device_ops ecm_ipa_netdev_ops = {
+	.ndo_open		= ecm_ipa_open,
+	.ndo_stop		= ecm_ipa_stop,
+	.ndo_start_xmit = ecm_ipa_start_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+};
+static const struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+const struct file_operations ecm_ipa_debugfs_tx_ops = {
+	.open = ecm_ipa_debugfs_tx_open,
+	.read = ecm_ipa_debugfs_enable_read,
+	.write = ecm_ipa_debugfs_enable_write,
+};
+const struct file_operations ecm_ipa_debugfs_rx_ops = {
+	.open = ecm_ipa_debugfs_rx_open,
+	.read = ecm_ipa_debugfs_enable_read,
+	.write = ecm_ipa_debugfs_enable_write,
+};
+const struct file_operations ecm_ipa_debugfs_rm_ops = {
+	.open = ecm_ipa_debugfs_rm_open,
+	.read = ecm_ipa_debugfs_enable_read,
+	.write = ecm_ipa_debugfs_enable_write,
+};
+const struct file_operations ecm_ipa_debugfs_dma_ops = {
+	.open = ecm_ipa_debugfs_dma_open,
+	.read = ecm_ipa_debugfs_enable_read,
+	.write = ecm_ipa_debugfs_enable_write_dma,
+};
+
+/**
+ * ecm_ipa_init() - initializes internal data structures
+ * @ecm_ipa_rx_dp_notify: supplied callback to be called by the IPA
+ * driver upon data packets received from USB pipe into IPA core.
+ * @ecm_ipa_rt_dp_notify: supplied callback to be called by the IPA
+ * driver upon exception packets sent from IPA pipe into USB core.
+ * @priv: should be passed later on to ecm_ipa_configure, hold the network
+ * structure allocated for STD ECM interface.
+ *
+ * Shall be called prior to pipe connection.
+ * The out parameters (the callbacks) shall be supplied to ipa_connect.
+ * Detailed description:
+ *  - set the callbacks to be used by the caller upon ipa_connect
+ *  - allocate the network device
+ *  - set the priv argument with a reference to the network device
+ *
+ * Returns negative errno, or zero on success
+ */
+int ecm_ipa_init(ecm_ipa_callback *ecm_ipa_rx_dp_notify,
+		ecm_ipa_callback *ecm_ipa_tx_dp_notify,
+		void **priv)
+{
+	int ret = 0;
+	struct net_device *net;
+	struct ecm_ipa_dev *dev;
+	ECM_IPA_LOG_ENTRY();
+	ECM_IPA_DEBUG("%s version %s\n", DRIVER_NAME, DRIVER_VERSION);
+	NULL_CHECK(ecm_ipa_rx_dp_notify);
+	NULL_CHECK(ecm_ipa_tx_dp_notify);
+	NULL_CHECK(priv);
+	net = alloc_etherdev(sizeof(struct ecm_ipa_dev));
+	if (!net) {
+		ret = -ENOMEM;
+		ECM_IPA_ERROR("fail to allocate etherdev\n");
+		goto fail_alloc_etherdev;
+	}
+	ECM_IPA_DEBUG("etherdev was successfully allocated\n");
+	dev = netdev_priv(net);
+	memset(dev, 0, sizeof(*dev));
+	dev->tx_enable = true;
+	dev->rx_enable = true;
+	spin_lock_init(&dev->ack_spinlock);
+	dev->net = net;
+	ecm_ipa_ctx = dev;
+	*priv = (void *)dev;
+	snprintf(net->name, sizeof(net->name), "%s%%d", "ecm");
+	net->netdev_ops = &ecm_ipa_netdev_ops;
+	ECM_IPA_DEBUG("internal data structures were intialized\n");
+	ret = ecm_ipa_debugfs_init(dev);
+	if (ret)
+		goto fail_debugfs;
+	ECM_IPA_DEBUG("debugfs entries were created\n");
+	*ecm_ipa_rx_dp_notify = ecm_ipa_packet_receive_notify;
+	*ecm_ipa_tx_dp_notify = ecm_ipa_tx_complete_notify;
+	ECM_IPA_LOG_EXIT();
+	return 0;
+fail_debugfs:
+	free_netdev(net);
+fail_alloc_etherdev:
+	return ret;
+}
+EXPORT_SYMBOL(ecm_ipa_init);
+
+/**
+ * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties
+ *				Headers will be commited to HW
+ * @dev: main driver context parameters
+ * @dst_mac: destination MAC address
+ * @src_mac: source MAC address
+ *
+ * Returns negative errno, or zero on success
+ */
+static int ecm_ipa_rules_cfg(struct ecm_ipa_dev *dev,
+		const void *dst_mac, const void *src_mac)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	struct ethhdr *eth_ipv4;
+	struct ethhdr *eth_ipv6;
+	int result = 0;
+
+	ECM_IPA_LOG_ENTRY();
+	hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+			GFP_KERNEL);
+	if (!hdrs) {
+		result = -ENOMEM;
+		goto out;
+	}
+	ipv4_hdr = &hdrs->hdr[0];
+	eth_ipv4 = (struct ethhdr *)ipv4_hdr->hdr;
+	ipv6_hdr = &hdrs->hdr[1];
+	eth_ipv6 = (struct ethhdr *)ipv6_hdr->hdr;
+	strlcpy(ipv4_hdr->name, ECM_IPA_IPV4_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv4->h_dest, dst_mac, ETH_ALEN);
+	memcpy(eth_ipv4->h_source, src_mac, ETH_ALEN);
+	eth_ipv4->h_proto = ETH_P_IP;
+	ipv4_hdr->hdr_len = ETH_HLEN;
+	ipv4_hdr->is_partial = 0;
+	strlcpy(ipv6_hdr->name, ECM_IPA_IPV6_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv6->h_dest, dst_mac, ETH_ALEN);
+	memcpy(eth_ipv6->h_source, src_mac, ETH_ALEN);
+	eth_ipv6->h_proto = ETH_P_IPV6;
+	ipv6_hdr->hdr_len = ETH_HLEN;
+	ipv6_hdr->is_partial = 0;
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+	result = ipa_add_hdr(hdrs);
+	if (result) {
+		ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+		goto out_free_mem;
+	}
+	if (ipv4_hdr->status) {
+		ECM_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n",
+				ipv4_hdr->status);
+		result = ipv4_hdr->status;
+		goto out_free_mem;
+	}
+	if (ipv6_hdr->status) {
+		ECM_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n",
+				ipv6_hdr->status);
+		result = ipv6_hdr->status;
+		goto out_free_mem;
+	}
+	dev->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	dev->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+	ECM_IPA_LOG_EXIT();
+out_free_mem:
+	kfree(hdrs);
+out:
+	return result;
+}
+
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *dev)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *ipv4;
+	struct ipa_hdr_del *ipv6;
+	int result;
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+			sizeof(*ipv6), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	ipv4 = &del_hdr->hdl[0];
+	ipv4->hdl = dev->eth_ipv4_hdr_hdl;
+	ipv6 = &del_hdr->hdl[1];
+	ipv6->hdl = dev->eth_ipv6_hdr_hdl;
+	result = ipa_del_hdr(del_hdr);
+	if (result || ipv4->status || ipv6->status)
+		ECM_IPA_ERROR("ipa_del_hdr failed");
+}
+
+static int ecm_ipa_register_tx(struct ecm_ipa_dev *dev)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *ipv4_property;
+	struct ipa_ioc_tx_intf_prop *ipv6_property;
+	int result = 0;
+	ECM_IPA_LOG_ENTRY();
+	tx_properties.prop = properties;
+	ipv4_property = &tx_properties.prop[0];
+	ipv4_property->ip = IPA_IP_v4;
+	ipv4_property->dst_pipe = IPA_TO_USB_CLIENT;
+	strlcpy(ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME,
+			IPA_RESOURCE_NAME_MAX);
+	ipv6_property = &tx_properties.prop[1];
+	ipv6_property->ip = IPA_IP_v6;
+	ipv6_property->dst_pipe = IPA_TO_USB_CLIENT;
+	strlcpy(ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME,
+			IPA_RESOURCE_NAME_MAX);
+	tx_properties.num_props = 2;
+	result = ipa_register_intf(dev->net->name, &tx_properties, NULL);
+	if (result)
+		ECM_IPA_ERROR("fail on Tx_prop registration\n");
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+static void ecm_ipa_deregister_tx(struct ecm_ipa_dev *dev)
+{
+	int result;
+	ECM_IPA_LOG_ENTRY();
+	result = ipa_deregister_intf(dev->net->name);
+	if (result)
+		ECM_IPA_DEBUG("Fail on Tx prop deregister\n");
+	ECM_IPA_LOG_EXIT();
+	return;
+}
+
+/**
+ * ecm_ipa_configure() - make IPA core end-point specific configuration
+ * @usb_to_ipa_hdl: handle of usb_to_ipa end-point for IPA driver
+ * @ipa_to_usb_hdl: handle of ipa_to_usb end-point for IPA driver
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ *
+ * Configure the usb_to_ipa and ipa_to_usb end-point registers
+ * - USB->IPA end-point: disable de-aggregation, enable link layer
+ *   header removal (Ethernet removal), source NATing and default routing.
+ * - IPA->USB end-point: disable aggregation, add link layer header (Ethernet)
+ * - allocate Ethernet device
+ * - register to Linux network stack
+ *
+ * Returns negative errno, or zero on success
+ */
+int ecm_ipa_configure(u8 host_ethaddr[], u8 device_ethaddr[],
+		void *priv)
+{
+	struct ecm_ipa_dev *dev = priv;
+	struct net_device *net;
+	int result;
+	ECM_IPA_LOG_ENTRY();
+	NULL_CHECK(host_ethaddr);
+	NULL_CHECK(host_ethaddr);
+	NULL_CHECK(dev);
+	net = dev->net;
+	NULL_CHECK(net);
+	ECM_IPA_DEBUG("host_ethaddr=%pM device_ethaddr=%pM\n",
+					host_ethaddr, device_ethaddr);
+	result = ecm_ipa_create_rm_resource(dev);
+	if (result) {
+		ECM_IPA_ERROR("fail on RM create\n");
+		return -EINVAL;
+	}
+	ECM_IPA_DEBUG("RM resource was created\n");
+	netif_carrier_off(dev->net);
+	result = ecm_ipa_set_device_ethernet_addr(net->dev_addr,
+			device_ethaddr);
+	if (result) {
+		ECM_IPA_ERROR("set device MAC failed\n");
+		goto fail_set_device_ethernet;
+	}
+	result = ecm_ipa_rules_cfg(dev, host_ethaddr, device_ethaddr);
+	if (result) {
+		ECM_IPA_ERROR("fail on ipa rules set\n");
+		goto fail_set_device_ethernet;
+	}
+	ECM_IPA_DEBUG("Ethernet header insertion was set\n");
+	result = ecm_ipa_register_tx(dev);
+	if (result) {
+		ECM_IPA_ERROR("fail on properties set\n");
+		goto fail_register_tx;
+	}
+	ECM_IPA_DEBUG("ECM Tx properties were registered\n");
+	result = register_netdev(net);
+	if (result) {
+		ECM_IPA_ERROR("register_netdev failed: %d\n", result);
+		goto fail_register_netdev;
+	}
+	ECM_IPA_DEBUG("register_netdev succeeded\n");
+	ECM_IPA_LOG_EXIT();
+	return 0;
+fail_register_netdev:
+	ecm_ipa_deregister_tx(dev);
+fail_register_tx:
+fail_set_device_ethernet:
+	ecm_ipa_rules_destroy(dev);
+	ecm_ipa_destory_rm_resource();
+	free_netdev(net);
+	return result;
+}
+EXPORT_SYMBOL(ecm_ipa_configure);
+
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+		void *priv)
+{
+	struct ecm_ipa_dev *dev = priv;
+	ECM_IPA_LOG_ENTRY();
+	NULL_CHECK(priv);
+	ECM_IPA_DEBUG("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d\n",
+					usb_to_ipa_hdl, ipa_to_usb_hdl);
+	if (!usb_to_ipa_hdl || usb_to_ipa_hdl >= IPA_CLIENT_MAX) {
+		ECM_IPA_ERROR("usb_to_ipa_hdl(%d) is not a valid ipa handle\n",
+				usb_to_ipa_hdl);
+		return -EINVAL;
+	}
+	if (!ipa_to_usb_hdl || ipa_to_usb_hdl >= IPA_CLIENT_MAX) {
+		ECM_IPA_ERROR("ipa_to_usb_hdl(%d) is not a valid ipa handle\n",
+				ipa_to_usb_hdl);
+		return -EINVAL;
+	}
+	dev->ipa_to_usb_hdl = ipa_to_usb_hdl;
+	dev->usb_to_ipa_hdl = usb_to_ipa_hdl;
+	ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl);
+	netif_carrier_on(dev->net);
+	if (!netif_carrier_ok(dev->net)) {
+		ECM_IPA_ERROR("netif_carrier_ok error\n");
+		return -EBUSY;
+	}
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(ecm_ipa_connect);
+
+int ecm_ipa_disconnect(void *priv)
+{
+	struct ecm_ipa_dev *dev = priv;
+	ECM_IPA_LOG_ENTRY();
+	NULL_CHECK(dev);
+	netif_carrier_off(dev->net);
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(ecm_ipa_disconnect);
+
+
+static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+		unsigned long data)
+{
+	struct ecm_ipa_dev *dev = user_data;
+	ECM_IPA_LOG_ENTRY();
+	if (event == IPA_RM_RESOURCE_GRANTED &&
+			netif_queue_stopped(dev->net)) {
+		ECM_IPA_DEBUG("Resource Granted - waking queue\n");
+		netif_wake_queue(dev->net);
+	} else {
+		ECM_IPA_DEBUG("Resource released\n");
+	}
+	ECM_IPA_LOG_EXIT();
+}
+
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *dev)
+{
+	struct ipa_rm_create_params create_params = {0};
+	int result;
+	ECM_IPA_LOG_ENTRY();
+	create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD;
+	create_params.reg_params.user_data = dev;
+	create_params.reg_params.notify_cb = ecm_ipa_rm_notify;
+	result = ipa_rm_create_resource(&create_params);
+	if (result) {
+		ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n");
+		goto fail_rm_create;
+	}
+	ECM_IPA_DEBUG("rm client was created");
+
+	result = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_STD_ECM_PROD,
+			INACTIVITY_MSEC_DELAY);
+	if (result) {
+		ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
+		goto fail_it;
+	}
+	ECM_IPA_DEBUG("rm_it client was created");
+	ECM_IPA_LOG_EXIT();
+	return 0;
+fail_it:
+fail_rm_create:
+	return result;
+}
+
+static void ecm_ipa_destory_rm_resource(void)
+{
+	ECM_IPA_LOG_ENTRY();
+	ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD);
+	ECM_IPA_LOG_EXIT();
+}
+
+static bool rx_filter(struct sk_buff *skb)
+{
+	struct ecm_ipa_dev *dev = netdev_priv(skb->dev);
+	return !dev->rx_enable;
+}
+
+static bool tx_filter(struct sk_buff *skb)
+{
+	struct ecm_ipa_dev *dev = netdev_priv(skb->dev);
+	return !dev->tx_enable;
+}
+
+static bool rm_enabled(struct ecm_ipa_dev *dev)
+{
+	return dev->rm_enable;
+}
+
+static int ecm_ipa_open(struct net_device *net)
+{
+	ECM_IPA_LOG_ENTRY();
+	netif_start_queue(net);
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+static int ecm_ipa_stop(struct net_device *net)
+{
+	ECM_IPA_LOG_ENTRY();
+	ECM_IPA_DEBUG("stopping net device\n");
+	netif_stop_queue(net);
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+/**
+ * ecm_ipa_cleanup() - destroys all
+ * ecm information
+ * @priv: main driver context parameters
+ *
+ */
+void ecm_ipa_cleanup(void *priv)
+{
+	struct ecm_ipa_dev *dev = priv;
+	ECM_IPA_LOG_ENTRY();
+	if (!dev) {
+		ECM_IPA_ERROR("dev NULL pointer\n");
+		return;
+	}
+	if (rm_enabled(dev)) {
+		ecm_ipa_destory_rm_resource();
+		ecm_ipa_debugfs_destroy(dev);
+	}
+	if (!dev->net) {
+		unregister_netdev(dev->net);
+		free_netdev(dev->net);
+	}
+	ECM_IPA_DEBUG("cleanup done\n");
+	ecm_ipa_ctx = NULL;
+	ECM_IPA_LOG_EXIT();
+	return ;
+}
+EXPORT_SYMBOL(ecm_ipa_cleanup);
+
+static int resource_request(struct ecm_ipa_dev *dev)
+{
+	int result = 0;
+	ECM_IPA_LOG_ENTRY();
+	if (!rm_enabled(dev))
+		goto out;
+	result = ipa_rm_inactivity_timer_request_resource(
+			IPA_RM_RESOURCE_STD_ECM_PROD);
+out:
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+static void resource_release(struct ecm_ipa_dev *dev)
+{
+	ECM_IPA_LOG_ENTRY();
+	if (!rm_enabled(dev))
+		goto out;
+	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
+out:
+	ECM_IPA_LOG_EXIT();
+}
+
+/**
+ * ecm_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ * @skb: packet received from Linux stack
+ * @net: the network device being used to send this packet
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - we are in a valid state were the queue is not stopped
+ * - Filter Tx switch is turned off
+ * - The resources required for actual Tx are all up
+ *
+ */
+static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
+					struct net_device *net)
+{
+	int ret;
+	netdev_tx_t status = NETDEV_TX_BUSY;
+	struct ecm_ipa_dev *dev = netdev_priv(net);
+	unsigned long flags;
+	ECM_IPA_LOG_ENTRY();
+	if (unlikely(netif_queue_stopped(net))) {
+		ECM_IPA_ERROR("interface queue is stopped\n");
+		goto out;
+	}
+	ECM_IPA_DEBUG("send (proto=0x%04x)\n", ntohs(skb->protocol));
+	if (unlikely(tx_filter(skb))) {
+		dev_kfree_skb_any(skb);
+		ECM_IPA_ERROR("packet got filtered out on Tx path\n");
+		status = NETDEV_TX_OK;
+		goto out;
+	}
+	ret = resource_request(dev);
+	if (ret) {
+		ECM_IPA_DEBUG("Waiting to resource\n");
+		netif_stop_queue(net);
+		goto resource_busy;
+	}
+	ECM_IPA_DEBUG("taking ack_lock\n");
+	spin_lock_irqsave(&dev->ack_spinlock, flags);
+	ECM_IPA_DEBUG("ack_lock taken\n");
+	if (dev->last_out_skb) {
+		ECM_IPA_DEBUG("No Tx-ack received for previous packet\n");
+		ECM_IPA_DEBUG("releasing ack_lock\n");
+		spin_unlock_irqrestore(&dev->ack_spinlock, flags);
+		ECM_IPA_DEBUG("ack_lock released\n");
+		netif_stop_queue(net);
+		status = -NETDEV_TX_BUSY;
+		goto out;
+	} else {
+		dev->last_out_skb = skb;
+	}
+	ECM_IPA_DEBUG("releasing ack_lock\n");
+	spin_unlock_irqrestore(&dev->ack_spinlock, flags);
+	ECM_IPA_DEBUG("ack_lock released\n");
+	sk_buff_print(skb);
+	ECM_IPA_DEBUG("ipa_tx_dp is called (dst_client=%d)\n",
+			IPA_TO_USB_CLIENT);
+	ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
+	if (ret) {
+		ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+		goto fail_tx_packet;
+	}
+	net->stats.tx_packets++;
+	net->stats.tx_bytes += skb->len;
+	ECM_IPA_LOG_EXIT();
+	status = NETDEV_TX_OK;
+	goto out;
+fail_tx_packet:
+out:
+	resource_release(dev);
+resource_busy:
+	ECM_IPA_LOG_EXIT();
+	return status;
+}
+
+/**
+ * ecm_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet with skb->data pointing to Ethernet packet frame
+ */
+void ecm_ipa_packet_receive_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct ecm_ipa_dev *dev = priv;
+	int result;
+	ECM_IPA_LOG_ENTRY();
+	if (evt != IPA_RECEIVE)	{
+		ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
+		return;
+	}
+	ECM_IPA_DEBUG("receive\n");
+	sk_buff_print(skb);
+	skb->dev = dev->net;
+	skb->protocol = eth_type_trans(skb, dev->net);
+	if (rx_filter(skb)) {
+		ECM_IPA_ERROR("packet got filtered out on Rx path\n");
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	ECM_IPA_DEBUG("kernel stack Rx is called\n");
+	result = netif_rx(skb);
+	if (result)
+		ECM_IPA_ERROR("fail on netif_rx\n");
+	dev->net->stats.rx_packets++;
+	dev->net->stats.rx_bytes += skb->len;
+	ECM_IPA_LOG_EXIT();
+	return;
+}
+
+/**
+ * ecm_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+void ecm_ipa_tx_complete_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct ecm_ipa_dev *dev = priv;
+	unsigned long flags;
+	ECM_IPA_LOG_ENTRY();
+
+	if (!dev) {
+		ECM_IPA_ERROR("dev is NULL pointer\n");
+		return;
+	}
+	if (evt != IPA_WRITE_DONE) {
+		ECM_IPA_ERROR("unsupported event on Tx callback\n");
+		return;
+	}
+	ECM_IPA_DEBUG("taking ack_lock\n");
+	spin_lock_irqsave(&dev->ack_spinlock, flags);
+	ECM_IPA_DEBUG("ack_lock taken\n");
+	if (skb != dev->last_out_skb)
+		ECM_IPA_ERROR("ACKed/Sent not the same(FIFO expected)\n");
+	dev->last_out_skb = NULL;
+	ECM_IPA_DEBUG("releasing ack_lock\n");
+	spin_unlock_irqrestore(&dev->ack_spinlock, flags);
+	ECM_IPA_DEBUG("ack_lock released\n");
+	if (netif_queue_stopped(dev->net)) {
+		ECM_IPA_DEBUG("waking up queue\n");
+		netif_wake_queue(dev->net);
+	}
+	dev_kfree_skb_any(skb);
+	ECM_IPA_LOG_EXIT();
+	return;
+}
+
+static int ecm_ipa_debugfs_tx_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *dev = inode->i_private;
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &(dev->tx_enable);
+	ECM_IPA_LOG_ENTRY();
+	return 0;
+}
+
+static int ecm_ipa_debugfs_rx_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *dev = inode->i_private;
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &(dev->rx_enable);
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+static int ecm_ipa_debugfs_rm_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *dev = inode->i_private;
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &(dev->rm_enable);
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ecm_ipa_dev *dev = file->private_data;
+	int result;
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &dev->dma_enable;
+	result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
+	if (dev->dma_enable)
+		ecm_ipa_ep_registers_dma_cfg(dev->usb_to_ipa_hdl);
+	else
+		ecm_ipa_ep_registers_cfg(dev->usb_to_ipa_hdl,
+				dev->usb_to_ipa_hdl);
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *dev = inode->i_private;
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = dev;
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char input;
+	bool *enable = file->private_data;
+	if (count != sizeof(input) + 1) {
+		ECM_IPA_ERROR("wrong input length(%zd)\n", count);
+		return -EINVAL;
+	}
+	if (!buf) {
+		ECM_IPA_ERROR("Bad argument\n");
+		return -EINVAL;
+	}
+	missing = copy_from_user(&input, buf, 1);
+	if (missing)
+		return -EFAULT;
+	ECM_IPA_DEBUG("input received %c\n", input);
+	*enable = input - '0';
+	ECM_IPA_DEBUG("value was set to %d\n", *enable);
+	return count;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int size = 0;
+	int ret;
+	loff_t pos;
+	u8 enable_str[sizeof(char)*3] = {0};
+	bool *enable = file->private_data;
+	pos = *ppos;
+	nbytes = scnprintf(enable_str, sizeof(enable_str), "%d\n", *enable);
+	ret = simple_read_from_buffer(ubuf, count, ppos, enable_str, nbytes);
+	if (ret < 0) {
+		ECM_IPA_ERROR("simple_read_from_buffer problem");
+		return ret;
+	}
+	size += ret;
+	count -= nbytes;
+	*ppos = pos + size;
+	return size;
+}
+
+static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *dev)
+{
+	const mode_t flags = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP | S_IWOTH;
+	int ret = -EINVAL;
+	ECM_IPA_LOG_ENTRY();
+	if (!dev)
+		return -EINVAL;
+	dev->folder = debugfs_create_dir("ecm_ipa", NULL);
+	if (!dev->folder) {
+		ECM_IPA_ERROR("could not create debugfs folder entry\n");
+		ret = -EFAULT;
+		goto fail_folder;
+	}
+	dev->tx_file = debugfs_create_file("tx_enable", flags, dev->folder, dev,
+		   &ecm_ipa_debugfs_tx_ops);
+	if (!dev->tx_file) {
+		ECM_IPA_ERROR("could not create debugfs tx file\n");
+		ret = -EFAULT;
+		goto fail_file;
+	}
+	dev->rx_file = debugfs_create_file("rx_enable", flags, dev->folder, dev,
+			&ecm_ipa_debugfs_rx_ops);
+	if (!dev->rx_file) {
+		ECM_IPA_ERROR("could not create debugfs rx file\n");
+		ret = -EFAULT;
+		goto fail_file;
+	}
+	dev->rm_file = debugfs_create_file("rm_enable", flags, dev->folder, dev,
+			&ecm_ipa_debugfs_rm_ops);
+	if (!dev->rm_file) {
+		ECM_IPA_ERROR("could not create debugfs rm file\n");
+		ret = -EFAULT;
+		goto fail_file;
+	}
+	dev->dma_file = debugfs_create_file("dma_enable", flags, dev->folder,
+			dev, &ecm_ipa_debugfs_dma_ops);
+	if (!dev->dma_file) {
+		ECM_IPA_ERROR("could not create debugfs dma file\n");
+		ret = -EFAULT;
+		goto fail_file;
+	}
+	ECM_IPA_LOG_EXIT();
+	return 0;
+fail_file:
+	debugfs_remove_recursive(dev->folder);
+fail_folder:
+	return ret;
+}
+
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *dev)
+{
+	debugfs_remove_recursive(dev->folder);
+}
+
+static void eth_get_drvinfo(struct net_device *net,
+		struct ethtool_drvinfo *drv_info)
+{
+	ECM_IPA_LOG_ENTRY();
+	strlcpy(drv_info->driver, DRIVER_NAME, sizeof(drv_info->driver));
+	strlcpy(drv_info->version, DRIVER_VERSION, sizeof(drv_info->version));
+	ECM_IPA_LOG_EXIT();
+}
+
+
+/**
+ * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM
+ *
+ *usb_to_ipa_hdl: handle received from ipa_connect
+ *ipa_to_usb_hdl: handle received from ipa_connect
+ *
+ * USB to IPA pipe:
+ *  - No de-aggregation
+ *  - Remove Ethernet header
+ *  - SRC NAT
+ *  - Default routing(0)
+ * IPA to USB Pipe:
+ *  - No aggregation
+ *  - Add Ethernet header
+ */
+int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl)
+{
+	int result = 0;
+	struct ipa_ep_cfg usb_to_ipa_ep_cfg;
+	struct ipa_ep_cfg ipa_to_usb_ep_cfg;
+	ECM_IPA_LOG_ENTRY();
+	memset(&usb_to_ipa_ep_cfg, 0 , sizeof(struct ipa_ep_cfg));
+	usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+	usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0;
+	usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	usb_to_ipa_ep_cfg.mode.mode = IPA_BASIC;
+	result = ipa_cfg_ep(usb_to_ipa_hdl, &usb_to_ipa_ep_cfg);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure USB to IPA point\n");
+		goto out;
+	}
+	memset(&ipa_to_usb_ep_cfg, 0 , sizeof(struct ipa_ep_cfg));
+	ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+	ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure IPA to USB end-point\n");
+		goto out;
+	}
+	ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+/**
+ * ecm_ipa_ep_registers_dma_cfg() - configure the USB endpoints for ECM
+ *	DMA
+ * @usb_to_ipa_hdl: handle received from ipa_connect
+ *
+ * This function will override the previous configuration
+ * which is needed for cores that does not support blocks logic
+ * Note that client handles are the actual pipe index
+ */
+int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl)
+{
+	int result = 0;
+	struct ipa_ep_cfg_mode cfg_mode;
+	u32 apps_to_ipa_hdl = 2;
+	ECM_IPA_LOG_ENTRY();
+	/* Apps to IPA - override the configuration made by IPA driver
+	 * in order to allow data path on older platforms*/
+	memset(&cfg_mode, 0 , sizeof(cfg_mode));
+	cfg_mode.mode = IPA_DMA;
+	cfg_mode.dst = IPA_CLIENT_USB_CONS;
+	result = ipa_cfg_ep_mode(apps_to_ipa_hdl, &cfg_mode);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure Apps to IPA\n");
+		goto out;
+	}
+	memset(&cfg_mode, 0 , sizeof(cfg_mode));
+	cfg_mode.mode = IPA_DMA;
+	cfg_mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	result = ipa_cfg_ep_mode(usb_to_ipa_hdl, &cfg_mode);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure USB to IPA\n");
+		goto out;
+	}
+	ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+static void ecm_ipa_dump_buff(u8 *buff, u32 byte_size)
+{
+	int i;
+	ECM_IPA_DEBUG("ofst(hex), addr(hex), data(hex), value(char):\n");
+	for (i = 0 ; i < byte_size; i += 4) {
+		ECM_IPA_DEBUG("%2x  %p   %02x %02x %02x %02x | %c %c %c %c\n",
+				i, &buff[i],
+				buff[i], buff[i+1], buff[i+2], buff[i+3],
+				buff[i], buff[i+1], buff[i+2], buff[i+3]);
+	}
+}
+
+/**
+ * sk_buff_print() - detailed sk_buff printouts
+ * @skb: the socket buff
+ */
+void sk_buff_print(struct sk_buff *skb)
+{
+	ECM_IPA_DEBUG("called by: %s\n", current->comm);
+	ECM_IPA_DEBUG("skb->next=0x%p, skb->prev=0x%p, skb->sk=0x%p\n",
+			skb->next, skb->prev, skb->sk);
+	ECM_IPA_DEBUG("skb->len=0x%x, skb->data_len=0x%x protocol=0x%04x\n",
+				skb->len, skb->data_len, skb->protocol);
+	ECM_IPA_DEBUG("skb->mac_len=0x%x, skb->hdr_len=0x%x, skb->csum=%x\n",
+			skb->mac_len, skb->hdr_len, skb->csum);
+
+	ECM_IPA_DEBUG("mac_header = 0x%p\n", skb_mac_header(skb));
+	ECM_IPA_DEBUG("network_header = 0x%p\n", skb_network_header(skb));
+	ECM_IPA_DEBUG("transport_header=0x%p\n", skb_transport_header(skb));
+
+	ECM_IPA_DEBUG("skb->head=0x%p\n", skb->head);
+	ECM_IPA_DEBUG("skb->data=0x%p\n", skb->data);
+	ECM_IPA_DEBUG("tail=0x%p\n", skb_tail_pointer(skb));
+	ECM_IPA_DEBUG("end =0x%p\n", skb_end_pointer(skb));
+	ECM_IPA_DEBUG("skb->truesize=0x%x (buffer size)\n",
+				skb->truesize);
+	ecm_ipa_dump_buff(skb->data, skb->len);
+}
+
+/**
+ * ecm_ipa_set_device_ethernet_addr() - set device etherenet address
+ * @dev_ethaddr: device etherenet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+int ecm_ipa_set_device_ethernet_addr(u8 *dev_ethaddr, u8 device_ethaddr[])
+{
+	if (!is_valid_ether_addr(device_ethaddr))
+		return -EINVAL;
+	memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+	ECM_IPA_DEBUG("device ethernet address: %pM\n", dev_ethaddr);
+	return 0;
+}
+
+/**
+ * ecm_ipa_init_module() - module initialization
+ *
+ */
+static int ecm_ipa_init_module(void)
+{
+	ECM_IPA_LOG_ENTRY();
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+/**
+ * ecm_ipa_cleanup_module() - module cleanup
+ *
+ */
+static void ecm_ipa_cleanup_module(void)
+{
+	ECM_IPA_LOG_ENTRY();
+	ECM_IPA_LOG_EXIT();
+	return;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ECM IPA network interface");
+
+late_initcall(ecm_ipa_init_module);
+module_exit(ecm_ipa_cleanup_module);
diff --git a/drivers/net/ethernet/msm/msm_rmnet_wwan.c b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
new file mode 100644
index 0000000..fe1ac46
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet_wwan.c
@@ -0,0 +1,736 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Network Interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/msm_rmnet.h>
+#include <linux/if_arp.h>
+#include <linux/platform_device.h>
+#include <net/pkt_sched.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <mach/ipa.h>
+
+#define WWAN_DEV_NAME "rmnet%d"
+#define WWAN_METADATA_MASK 0x00FF0000
+#define IPA_RM_INACTIVITY_TIMER 1000
+#define WWAN_DEVICE_COUNT (8)
+#define WWAN_DATA_LEN 2000
+#define HEADROOM_FOR_A2_MUX   8 /* for mux header */
+#define TAILROOM              8 /* for padding by mux layer */
+
+enum wwan_device_status {
+	WWAN_DEVICE_INACTIVE = 0,
+	WWAN_DEVICE_ACTIVE   = 1
+};
+static enum ipa_rm_resource_name
+	ipa_rm_resource_by_ch_id[WWAN_DEVICE_COUNT] = {
+	IPA_RM_RESOURCE_WWAN_0_PROD,
+	IPA_RM_RESOURCE_WWAN_1_PROD,
+	IPA_RM_RESOURCE_WWAN_2_PROD,
+	IPA_RM_RESOURCE_WWAN_3_PROD,
+	IPA_RM_RESOURCE_WWAN_4_PROD,
+	IPA_RM_RESOURCE_WWAN_5_PROD,
+	IPA_RM_RESOURCE_WWAN_6_PROD,
+	IPA_RM_RESOURCE_WWAN_7_PROD
+};
+static enum a2_mux_logical_channel_id
+	a2_mux_lcid_by_ch_id[WWAN_DEVICE_COUNT] = {
+	A2_MUX_WWAN_0,
+	A2_MUX_WWAN_1,
+	A2_MUX_WWAN_2,
+	A2_MUX_WWAN_3,
+	A2_MUX_WWAN_4,
+	A2_MUX_WWAN_5,
+	A2_MUX_WWAN_6,
+	A2_MUX_WWAN_7
+};
+
+/**
+ * struct wwan_private - WWAN private data
+ * @stats: iface statistics
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct wwan_private {
+	struct net_device_stats stats;
+	uint32_t ch_id;
+	spinlock_t lock;
+	struct completion resource_granted_completion;
+	enum wwan_device_status device_status;
+};
+
+static struct net_device *netdevs[WWAN_DEVICE_COUNT];
+
+static __be16 wwan_ip_type_trans(struct sk_buff *skb)
+{
+	__be16 protocol = 0;
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] %s() L3 protocol decode error: 0x%02x",
+		       skb->dev->name, __func__, skb->data[0] & 0xf0);
+		/* skb will be dropped in upper layer for unknown protocol */
+		break;
+	}
+	return protocol;
+}
+
+/**
+ * a2_mux_recv_notify() - Deliver an RX packet to network stack
+ *
+ * @skb: skb to be delivered
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_recv_notify(void *dev, struct sk_buff *skb)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+	skb->dev = dev;
+	skb->protocol = wwan_ip_type_trans(skb);
+	wwan_ptr->stats.rx_packets++;
+	wwan_ptr->stats.rx_bytes += skb->len;
+	pr_debug("[%s] Rx packet #%lu len=%d\n",
+		skb->dev->name,
+		wwan_ptr->stats.rx_packets, skb->len);
+	netif_rx(skb);
+}
+
+/**
+ * wwan_send_packet() - Deliver a TX packet to A2 MUX driver.
+ *
+ * @skb: skb to be delivered
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -EAGAIN: A2 MUX is not ready to send the skb. try later
+ * -EFAULT: A2 MUX rejected the skb
+ * -EPREM: Unknown error
+ */
+static int wwan_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	int ret;
+
+	dev->trans_start = jiffies;
+	ret = a2_mux_write(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id], skb);
+	if (ret != 0 && ret != -EAGAIN && ret != -EFAULT) {
+		pr_err("[%s] %s: write returned error %d",
+			dev->name, __func__, ret);
+		return -EPERM;
+	}
+	return ret;
+}
+
+/**
+ * a2_mux_write_done() - Update device statistics and start
+ * network stack queue is was stop and A2 MUX queue is below low
+ * watermark.
+ *
+ * @dev: network device
+ * @skb: skb to be delivered
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_write_done(void *dev, struct sk_buff *skb)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	unsigned long flags;
+
+	pr_debug("%s: write complete\n", __func__);
+	wwan_ptr->stats.tx_packets++;
+	wwan_ptr->stats.tx_bytes += skb->len;
+	pr_debug("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+	    ((struct net_device *)(dev))->name, wwan_ptr->stats.tx_packets,
+	    skb->len, skb->mark);
+	dev_kfree_skb_any(skb);
+	spin_lock_irqsave(&wwan_ptr->lock, flags);
+	if (netif_queue_stopped(dev) &&
+	    a2_mux_is_ch_low(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
+		pr_debug("%s: Low WM hit, waking queue=%p\n",
+		      __func__, skb);
+		netif_wake_queue(dev);
+	}
+	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+}
+
+/**
+ * a2_mux_notify() - Callback function for A2 MUX events Handles
+ * A2_MUX_RECEIVE and A2_MUX_WRITE_DONE events.
+ *
+ * @dev: network device
+ * @event: A2 MUX event
+ * @data: Additional data provided by A2 MUX
+ *
+ * Return codes:
+ * None
+ */
+static void a2_mux_notify(void *dev, enum a2_mux_event_type event,
+			  unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+
+	switch (event) {
+	case A2_MUX_RECEIVE:
+		if (!skb) {
+			pr_err("[%s] %s: No skb received",
+			   ((struct net_device *)dev)->name, __func__);
+			return;
+		}
+		a2_mux_recv_notify(dev, skb);
+		break;
+	case A2_MUX_WRITE_DONE:
+		a2_mux_write_done(dev, skb);
+		break;
+	default:
+		pr_err("%s: unknown event %d\n", __func__, event);
+		break;
+	}
+}
+
+/**
+ * ipa_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_resource_granted(void *dev)
+{
+	netif_wake_queue(dev);
+}
+/**
+ * ipa_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
+			  unsigned long data)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+	pr_debug("%s: event %d\n", __func__, event);
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+			complete_all(&wwan_ptr->resource_granted_completion);
+			break;
+		}
+		ipa_rm_resource_granted(dev);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		break;
+	default:
+		pr_err("%s: unknown event %d\n", __func__, event);
+		break;
+	}
+}
+
+static int wwan_register_to_ipa(struct net_device *dev)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+	struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	int ret = 0;
+
+	pr_debug("[%s] %s:\n", dev->name, __func__);
+	tx_properties.prop = tx_ioc_properties;
+	tx_ipv4_property = &tx_properties.prop[0];
+	tx_ipv4_property->ip = IPA_IP_v4;
+	tx_ipv4_property->dst_pipe = IPA_CLIENT_A2_EMBEDDED_CONS;
+	snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+	tx_ipv6_property = &tx_properties.prop[1];
+	tx_ipv6_property->ip = IPA_IP_v6;
+	tx_ipv6_property->dst_pipe = IPA_CLIENT_A2_EMBEDDED_CONS;
+	snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V6_PREF,
+		 a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+	tx_properties.num_props = 2;
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv4_property->attrib.meta_data = wwan_ptr->ch_id;
+	rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv6_property->attrib.meta_data = wwan_ptr->ch_id;
+	rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_A2_EMBEDDED_PROD;
+	rx_properties.num_props = 2;
+	ret = ipa_register_intf(dev->name, &tx_properties, &rx_properties);
+	if (ret) {
+		pr_err("[%s] %s: ipa_register_intf failed %d\n", dev->name,
+		       __func__, ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int __wwan_open(struct net_device *dev)
+{
+	int r;
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+	pr_debug("[%s] __wwan_open()\n", dev->name);
+	if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) {
+		INIT_COMPLETION(wwan_ptr->resource_granted_completion);
+		r = ipa_rm_inactivity_timer_request_resource(
+			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+		if (r < 0 && r != -EINPROGRESS) {
+			pr_err("%s: ipa rm timer request resource failed %d\n",
+					__func__, r);
+			return -ENODEV;
+		}
+		if (r == -EINPROGRESS) {
+			wait_for_completion(
+				&wwan_ptr->resource_granted_completion);
+		}
+		r = a2_mux_open_channel(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id],
+					dev, a2_mux_notify);
+		if (r < 0) {
+			pr_err("%s: ch=%d failed with rc %d\n",
+					__func__, wwan_ptr->ch_id, r);
+			ipa_rm_inactivity_timer_release_resource(
+				ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+			return -ENODEV;
+		}
+		ipa_rm_inactivity_timer_release_resource(
+			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+		r = wwan_register_to_ipa(dev);
+		if (r < 0) {
+			pr_err("%s: ch=%d failed to register to IPA rc %d\n",
+					__func__, wwan_ptr->ch_id, r);
+			return -ENODEV;
+		}
+	}
+	wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+	return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int wwan_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	pr_debug("[%s] wwan_open()\n", dev->name);
+	rc = __wwan_open(dev);
+	if (rc == 0)
+		netif_start_queue(dev);
+	return rc;
+}
+
+
+static int __wwan_close(struct net_device *dev)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	int rc = 0;
+
+	if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+		wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+		/* do not close wwan port once up,  this causes
+			remote side to hang if tried to open again */
+		INIT_COMPLETION(wwan_ptr->resource_granted_completion);
+		rc = ipa_rm_inactivity_timer_request_resource(
+			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+		if (rc < 0 && rc != -EINPROGRESS) {
+			pr_err("%s: ipa rm timer request resource failed %d\n",
+					__func__, rc);
+			return -ENODEV;
+		}
+		if (rc == -EINPROGRESS) {
+			wait_for_completion(
+				&wwan_ptr->resource_granted_completion);
+		}
+		rc = a2_mux_close_channel(
+			a2_mux_lcid_by_ch_id[wwan_ptr->ch_id]);
+		if (rc) {
+			pr_err("[%s] %s: a2_mux_close_channel failed %d\n",
+			       dev->name, __func__, rc);
+			ipa_rm_inactivity_timer_release_resource(
+				ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+			return rc;
+		}
+		ipa_rm_inactivity_timer_release_resource(
+			ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+		rc = ipa_deregister_intf(dev->name);
+		if (rc) {
+			pr_err("[%s] %s: ipa_deregister_intf failed %d\n",
+			       dev->name, __func__, rc);
+			return rc;
+		}
+		return rc;
+	} else
+		return -EBADF;
+}
+
+/**
+ * wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int wwan_stop(struct net_device *dev)
+{
+	pr_debug("[%s] wwan_stop()\n", dev->name);
+	__wwan_close(dev);
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+		return -EINVAL;
+	pr_debug("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/**
+ * wwan_xmit() - Transmits an skb. In charge of asking IPA
+ * RM needed resources. In case that IPA RM is not ready, then
+ * the skb is saved for tranmitting as soon as IPA RM resources
+ * are granted.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	unsigned long flags;
+	int ret = 0;
+
+	if (netif_queue_stopped(dev)) {
+		pr_err("[%s]fatal: wwan_xmit called when netif_queue stopped\n",
+		       dev->name);
+		return 0;
+	}
+	ret = ipa_rm_inactivity_timer_request_resource(
+		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+	if (ret == -EINPROGRESS) {
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+	if (ret) {
+		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+		       dev->name, ret);
+		return -EFAULT;
+	}
+	ret = wwan_send_packet(skb, dev);
+	if (ret == -EPERM) {
+		ret = NETDEV_TX_BUSY;
+		goto exit;
+	}
+	/*
+	 * detected SSR a bit early.  shut some things down now, and leave
+	 * the rest to the main ssr handling code when that happens later
+	 */
+	if (ret == -EFAULT) {
+		netif_carrier_off(dev);
+		dev_kfree_skb_any(skb);
+		ret = 0;
+		goto exit;
+	}
+	if (ret == -EAGAIN) {
+		/*
+		 * This should not happen
+		 * EAGAIN means we attempted to overflow the high watermark
+		 * Clearly the queue is not stopped like it should be, so
+		 * stop it and return BUSY to the TCP/IP framework.  It will
+		 * retry this packet with the queue is restarted which happens
+		 * in the write_done callback when the low watermark is hit.
+		 */
+		netif_stop_queue(dev);
+		ret = NETDEV_TX_BUSY;
+		goto exit;
+	}
+	spin_lock_irqsave(&wwan_ptr->lock, flags);
+	if (a2_mux_is_ch_full(a2_mux_lcid_by_ch_id[wwan_ptr->ch_id])) {
+		netif_stop_queue(dev);
+		pr_debug("%s: High WM hit, stopping queue=%p\n",
+		       __func__, skb);
+	}
+	spin_unlock_irqrestore(&wwan_ptr->lock, flags);
+exit:
+	ipa_rm_inactivity_timer_release_resource(
+		ipa_rm_resource_by_ch_id[wwan_ptr->ch_id]);
+	return ret;
+}
+
+static struct net_device_stats *wwan_get_stats(struct net_device *dev)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	return &wwan_ptr->stats;
+}
+
+static void wwan_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] wwan_tx_timeout()\n", dev->name);
+}
+
+/**
+ * wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * WWAN_IOCTL_OPEN - Open the network interface
+ * WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int rc = 0;
+
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_IP:        /* Set RAWIP protocol */
+		break;
+	case RMNET_IOCTL_GET_LLP:           /* Get link protocol state */
+		ifr->ifr_ifru.ifru_data = (void *) RMNET_MODE_LLP_IP;
+		break;
+	case RMNET_IOCTL_SET_QOS_DISABLE:   /* Set QoS header disabled */
+		break;
+	case RMNET_IOCTL_FLOW_ENABLE:
+		tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 1);
+		pr_debug("[%s] %s: enabled flow", dev->name, __func__);
+		break;
+	case RMNET_IOCTL_FLOW_DISABLE:
+		tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 0);
+		pr_debug("[%s] %s: disabled flow", dev->name, __func__);
+		break;
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		/* QoS disabled */
+		ifr->ifr_ifru.ifru_data = (void *) 0;
+		break;
+	case RMNET_IOCTL_GET_OPMODE:        /* Get operation mode      */
+		ifr->ifr_ifru.ifru_data = (void *) RMNET_MODE_LLP_IP;
+		break;
+	case RMNET_IOCTL_OPEN:  /* Open transport port */
+		rc = __wwan_open(dev);
+		pr_debug("[%s] wwan_ioctl(): open transport port\n",
+		     dev->name);
+		break;
+	case RMNET_IOCTL_CLOSE:  /* Close transport port */
+		rc = __wwan_close(dev);
+		pr_debug("[%s] wwan_ioctl(): close transport port\n",
+		     dev->name);
+		break;
+	default:
+		pr_err("[%s] error: wwan_ioct called for unsupported cmd[%d]",
+		       dev->name, cmd);
+		return -EINVAL;
+	}
+	return rc;
+}
+
+static const struct net_device_ops wwan_ops_ip = {
+	.ndo_open = wwan_open,
+	.ndo_stop = wwan_stop,
+	.ndo_start_xmit = wwan_xmit,
+	.ndo_get_stats = wwan_get_stats,
+	.ndo_tx_timeout = wwan_tx_timeout,
+	.ndo_do_ioctl = wwan_ioctl,
+	.ndo_change_mtu = wwan_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+static void wwan_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &wwan_ops_ip;
+	ether_setup(dev);
+	/* set this after calling ether_setup */
+	dev->header_ops = 0;  /* No header */
+	dev->type = ARPHRD_RAWIP;
+	dev->hard_header_len = 0;
+	dev->mtu = WWAN_DATA_LEN;
+	dev->addr_len = 0;
+	dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+	dev->needed_headroom = HEADROOM_FOR_A2_MUX;
+	dev->needed_tailroom = TAILROOM;
+	dev->watchdog_timeo = 1000;
+}
+
+/**
+ * wwan_init() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ */
+static int __init wwan_init(void)
+{
+	int ret;
+	struct net_device *dev;
+	struct wwan_private *wwan_ptr;
+	unsigned n;
+	struct ipa_rm_create_params ipa_rm_params;
+
+	pr_info("%s: WWAN devices[%d]\n", __func__, WWAN_DEVICE_COUNT);
+	for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct wwan_private),
+				   WWAN_DEV_NAME, wwan_setup);
+		if (!dev) {
+			pr_err("%s: no memory for netdev %d\n", __func__, n);
+			ret = -ENOMEM;
+			goto fail;
+		}
+		netdevs[n] = dev;
+		wwan_ptr = netdev_priv(dev);
+		wwan_ptr->ch_id = n;
+		spin_lock_init(&wwan_ptr->lock);
+		init_completion(&wwan_ptr->resource_granted_completion);
+		memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+		ipa_rm_params.name = ipa_rm_resource_by_ch_id[n];
+		ipa_rm_params.reg_params.user_data = dev;
+		ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
+		ret = ipa_rm_create_resource(&ipa_rm_params);
+		if (ret) {
+			pr_err("%s: unable to create resourse %d in IPA RM\n",
+			       __func__, ipa_rm_resource_by_ch_id[n]);
+			goto fail;
+		}
+		ret = ipa_rm_inactivity_timer_init(ipa_rm_resource_by_ch_id[n],
+						   IPA_RM_INACTIVITY_TIMER);
+		if (ret) {
+			pr_err("%s: ipa rm timer init failed %d on ins %d\n",
+			       __func__, ret, n);
+			goto fail;
+		}
+		ret = ipa_rm_add_dependency(ipa_rm_resource_by_ch_id[n],
+					    IPA_RM_RESOURCE_A2_CONS);
+		if (ret) {
+			pr_err("%s: unable to add dependency %d rc=%d\n",
+			       __func__, n, ret);
+			goto fail;
+		}
+		ret = register_netdev(dev);
+		if (ret) {
+			pr_err("%s: unable to register netdev %d rc=%d\n",
+			       __func__, n, ret);
+			goto fail;
+		}
+	}
+	return 0;
+fail:
+	for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+		if (!netdevs[n])
+			break;
+		unregister_netdev(netdevs[n]);
+		ipa_rm_inactivity_timer_destroy(ipa_rm_resource_by_ch_id[n]);
+		free_netdev(netdevs[n]);
+		netdevs[n] = NULL;
+	}
+	return ret;
+}
+late_initcall(wwan_init);
+
+void wwan_cleanup(void)
+{
+	unsigned n;
+
+	pr_info("%s: WWAN devices[%d]\n", __func__, WWAN_DEVICE_COUNT);
+	for (n = 0; n < WWAN_DEVICE_COUNT; n++) {
+		unregister_netdev(netdevs[n]);
+		ipa_rm_inactivity_timer_destroy(ipa_rm_resource_by_ch_id[n]);
+		free_netdev(netdevs[n]);
+		netdevs[n] = NULL;
+	}
+}
+
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
index 025410a..6a315d2 100644
--- a/drivers/net/wireless/wcnss/wcnss_vreg.c
+++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
@@ -156,11 +156,16 @@
 			goto fail;
 		}
 
-		pr_debug("wcnss: Indicate NV bin download\n");
-		spare_reg = msm_wcnss_base + spare_offset;
-		reg = readl_relaxed(spare_reg);
-		reg |= NVBIN_DLND_BIT;
-		writel_relaxed(reg, spare_reg);
+		/* power on thru SSR should not set NV bit,
+		 * during SSR, NV bin is downloaded by WLAN driver
+		 */
+		if (!wcnss_cold_boot_done()) {
+			pr_debug("wcnss: Indicate NV bin download\n");
+			spare_reg = msm_wcnss_base + spare_offset;
+			reg = readl_relaxed(spare_reg);
+			reg |= NVBIN_DLND_BIT;
+			writel_relaxed(reg, spare_reg);
+		}
 
 		pmu_conf_reg = msm_wcnss_base + pmu_offset;
 
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 439b1f8..ed4e246 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -157,6 +157,7 @@
 	const struct dev_pm_ops *pm_ops;
 	int		triggered;
 	int		smd_channel_ready;
+	int		cold_boot_done;
 	smd_channel_t	*smd_ch;
 	unsigned char	wcnss_version[WCNSS_VERSION_LEN];
 	unsigned int	serial_number;
@@ -727,6 +728,16 @@
 }
 EXPORT_SYMBOL(wcnss_hardware_type);
 
+int wcnss_cold_boot_done(void)
+{
+	if (penv)
+		return penv->cold_boot_done;
+	else
+		return -ENODEV;
+}
+EXPORT_SYMBOL(wcnss_cold_boot_done);
+
+
 static int wcnss_smd_tx(void *data, int len)
 {
 	int ret = 0;
@@ -965,12 +976,11 @@
 		if (has_pronto_hw) {
 			has_48mhz_xo = of_property_read_bool(pdev->dev.of_node,
 										"qcom,has_48mhz_xo");
-			penv->wcnss_hw_type = WCNSS_PRONTO_HW;
 		} else {
-			penv->wcnss_hw_type = WCNSS_RIVA_HW;
 			has_48mhz_xo = pdata->has_48mhz_xo;
 		}
 	}
+	penv->wcnss_hw_type = (has_pronto_hw) ? WCNSS_PRONTO_HW : WCNSS_RIVA_HW;
 	penv->wlan_config.use_48mhz_xo = has_48mhz_xo;
 
 	penv->thermal_mitigation = 0;
@@ -1062,6 +1072,7 @@
 			goto fail_ioremap;
 		}
 	}
+	penv->cold_boot_done = 1;
 
 	return 0;
 
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index f4dff66..76e3175 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -101,4 +101,14 @@
 		This driver gets the Q6 out of power collapsed state and
 		exposes ioctl control to read avtimer tick.
 
+config SSM
+	tristate "Qualcomm Secure Service Module"
+	depends on QSEECOM
+	depends on MSM_SMD
+	help
+	  Provides an interface for OEM driver to communicate with Trustzone
+	  and modem for key exchange and mode change.
+	  This driver uses Secure Channel Manager interface for trustzone
+	  communication and communicates with modem over SMD channel.
+
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index a679fb9..289ece9 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -10,3 +10,4 @@
 obj-$(CONFIG_QPNP_VIBRATOR) += qpnp-vibrator.o
 obj-$(CONFIG_QPNP_CLKDIV) += qpnp-clkdiv.o
 obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
+obj-$(CONFIG_SSM) += ssm.o
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index c541eb7..b7eca61 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_IPA) += ipat.o
 ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
-	ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o
+	ipa_utils.o ipa_nat.o rmnet_bridge.o a2_service.o ipa_bridge.o ipa_intf.o teth_bridge.o \
+	ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/a2_service.c b/drivers/platform/msm/ipa/a2_service.c
index 0ae2552..4b5f0a2 100644
--- a/drivers/platform/msm/ipa/a2_service.c
+++ b/drivers/platform/msm/ipa/a2_service.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,267 +10,1568 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <mach/bam_dmux.h>
-#include <mach/ipa.h>
+/*
+ *  A2 service component
+ */
+
+#include <net/ip.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/clk.h>
+#include <linux/wakelock.h>
 #include <mach/sps.h>
+#include <mach/msm_smsm.h>
+#include <mach/socinfo.h>
+#include <mach/ipa.h>
 #include "ipa_i.h"
 
-static struct a2_service_cb_type {
-	void *tx_complete_cb;
-	void *rx_cb;
-	u32 producer_handle;
-	u32 consumer_handle;
-} a2_service_cb;
+#define A2_NUM_PIPES				6
+#define A2_SUMMING_THRESHOLD			4096
+#define BUFFER_SIZE				2048
+#define NUM_BUFFERS				32
+#define BAM_CH_LOCAL_OPEN			0x1
+#define BAM_CH_REMOTE_OPEN			0x2
+#define BAM_CH_IN_RESET				0x4
+#define BAM_MUX_HDR_MAGIC_NO			0x33fc
+#define BAM_MUX_HDR_CMD_DATA			0
+#define BAM_MUX_HDR_CMD_OPEN			1
+#define BAM_MUX_HDR_CMD_CLOSE			2
+#define BAM_MUX_HDR_CMD_STATUS			3
+#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC		4
+#define LOW_WATERMARK				2
+#define HIGH_WATERMARK				4
+#define A2_MUX_COMPLETION_TIMEOUT		(60*HZ)
+#define ENABLE_DISCONNECT_ACK			0x1
+#define A2_MUX_PADDING_LENGTH(len)		(4 - ((len) & 0x3))
 
-static struct sps_mem_buffer data_mem_buf[2];
-static struct sps_mem_buffer desc_mem_buf[2];
+struct bam_ch_info {
+	u32			status;
+	a2_mux_notify_cb	notify_cb;
+	void			*user_data;
+	spinlock_t		lock;
+	int			num_tx_pkts;
+	int			use_wm;
+	u32			v4_hdr_hdl;
+	u32			v6_hdr_hdl;
+};
+struct tx_pkt_info {
+	struct sk_buff		*skb;
+	char			is_cmd;
+	u32			len;
+	struct list_head	list_node;
+	unsigned		ts_sec;
+	unsigned long		ts_nsec;
+};
+struct bam_mux_hdr {
+	u16			magic_num;
+	u8			reserved;
+	u8			cmd;
+	u8			pad_len;
+	u8			ch_id;
+	u16			pkt_len;
+};
 
-static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
-			u8 *usb_pipe_idx,
-			u32 *clnt_hdl,
-			struct sps_pipe *pipe);
+struct a2_mux_context_type {
+	u32 tethered_prod;
+	u32 tethered_cons;
+	u32 embedded_prod;
+	u32 embedded_cons;
+	int a2_mux_apps_pc_enabled;
+	struct work_struct kickoff_ul_wakeup;
+	struct work_struct kickoff_ul_power_down;
+	struct work_struct kickoff_ul_request_resource;
+	struct	bam_ch_info bam_ch[A2_MUX_NUM_CHANNELS];
+	struct list_head bam_tx_pool;
+	spinlock_t bam_tx_pool_spinlock;
+	struct workqueue_struct *a2_mux_tx_workqueue;
+	int a2_mux_initialized;
+	bool bam_is_connected;
+	int a2_mux_send_power_vote_on_init_once;
+	int a2_mux_sw_bridge_is_connected;
+	u32 a2_device_handle;
+	struct mutex wakeup_lock;
+	struct completion ul_wakeup_ack_completion;
+	struct completion bam_connection_completion;
+	struct completion request_resource_completion;
+	rwlock_t ul_wakeup_lock;
+	int wait_for_ack;
+	struct wake_lock bam_wakelock;
+	int a2_pc_disabled;
+	spinlock_t wakelock_reference_lock;
+	int wakelock_reference_count;
+	int a2_pc_disabled_wakelock_skipped;
+	int disconnect_ack;
+	struct mutex smsm_cb_lock;
+	int bam_dmux_uplink_vote;
+};
+static struct a2_mux_context_type *a2_mux_ctx;
 
-static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
-		struct ipa_sps_params *out_params, u32 *clnt_hdl);
+static void handle_bam_mux_cmd(struct sk_buff *rx_skb);
+
+static bool bam_ch_is_open(int index)
+{
+	return a2_mux_ctx->bam_ch[index].status ==
+		(BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN);
+}
+
+static bool bam_ch_is_local_open(int index)
+{
+	return a2_mux_ctx->bam_ch[index].status &
+		BAM_CH_LOCAL_OPEN;
+}
+
+static bool bam_ch_is_remote_open(int index)
+{
+	return a2_mux_ctx->bam_ch[index].status &
+		BAM_CH_REMOTE_OPEN;
+}
+
+static bool bam_ch_is_in_reset(int index)
+{
+	return a2_mux_ctx->bam_ch[index].status &
+		BAM_CH_IN_RESET;
+}
+
+static void set_tx_timestamp(struct tx_pkt_info *pkt)
+{
+	unsigned long long t_now;
+
+	t_now = sched_clock();
+	pkt->ts_nsec = do_div(t_now, 1000000000U);
+	pkt->ts_sec = (unsigned)t_now;
+}
+
+static void verify_tx_queue_is_empty(const char *func)
+{
+	unsigned long flags;
+	struct tx_pkt_info *info;
+	int reported = 0;
+
+	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+	list_for_each_entry(info, &a2_mux_ctx->bam_tx_pool, list_node) {
+		if (!reported) {
+			IPADBG("%s: tx pool not empty\n", func);
+			reported = 1;
+		}
+		IPADBG("%s: node=%p ts=%u.%09lu\n", __func__,
+			&info->list_node, info->ts_sec, info->ts_nsec);
+	}
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+}
+
+static void grab_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
+	IPADBG("%s: ref count = %d\n",
+		__func__,
+		a2_mux_ctx->wakelock_reference_count);
+	if (a2_mux_ctx->wakelock_reference_count == 0)
+		wake_lock(&a2_mux_ctx->bam_wakelock);
+	++a2_mux_ctx->wakelock_reference_count;
+	spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
+}
+
+static void release_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
+	if (a2_mux_ctx->wakelock_reference_count == 0) {
+		IPAERR("%s: bam_dmux wakelock not locked\n", __func__);
+		dump_stack();
+		spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock,
+				       flags);
+		return;
+	}
+	IPADBG("%s: ref count = %d\n",
+		__func__,
+		a2_mux_ctx->wakelock_reference_count);
+	--a2_mux_ctx->wakelock_reference_count;
+	if (a2_mux_ctx->wakelock_reference_count == 0)
+		wake_unlock(&a2_mux_ctx->bam_wakelock);
+	spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
+}
+
+static void toggle_apps_ack(void)
+{
+	static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
+
+	IPADBG("%s: apps ack %d->%d\n", __func__,
+			clear_bit & 0x1, ~clear_bit & 0x1);
+	smsm_change_state(SMSM_APPS_STATE,
+				clear_bit & SMSM_A2_POWER_CONTROL_ACK,
+				~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
+	clear_bit = ~clear_bit;
+}
+
+static void power_vote(int vote)
+{
+	IPADBG("%s: curr=%d, vote=%d\n",
+		__func__,
+		a2_mux_ctx->bam_dmux_uplink_vote, vote);
+	if (a2_mux_ctx->bam_dmux_uplink_vote == vote)
+		IPADBG("%s: warning - duplicate power vote\n", __func__);
+	a2_mux_ctx->bam_dmux_uplink_vote = vote;
+	if (vote)
+		smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
+	else
+		smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
+}
+
+static inline void ul_powerdown(void)
+{
+	IPADBG("%s: powerdown\n", __func__);
+	verify_tx_queue_is_empty(__func__);
+	if (a2_mux_ctx->a2_pc_disabled)
+		release_wakelock();
+	else {
+		a2_mux_ctx->wait_for_ack = 1;
+		INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
+		power_vote(0);
+	}
+	a2_mux_ctx->bam_is_connected = false;
+}
+
+static void ul_wakeup(void)
+{
+	int ret;
+
+	mutex_lock(&a2_mux_ctx->wakeup_lock);
+	if (a2_mux_ctx->bam_is_connected) {
+		IPADBG("%s Already awake\n", __func__);
+		mutex_unlock(&a2_mux_ctx->wakeup_lock);
+		return;
+	}
+	if (a2_mux_ctx->a2_pc_disabled) {
+		/*
+		 * don't grab the wakelock the first time because it is
+		 * already grabbed when a2 powers on
+		 */
+		if (likely(a2_mux_ctx->a2_pc_disabled_wakelock_skipped))
+			grab_wakelock();
+		else
+			a2_mux_ctx->a2_pc_disabled_wakelock_skipped = 1;
+		a2_mux_ctx->bam_is_connected = true;
+		mutex_unlock(&a2_mux_ctx->wakeup_lock);
+		return;
+	}
+	/*
+	 * must wait for the previous power down request to have been acked
+	 * chances are it already came in and this will just fall through
+	 * instead of waiting
+	 */
+	if (a2_mux_ctx->wait_for_ack) {
+		IPADBG("%s waiting for previous ack\n", __func__);
+		ret = wait_for_completion_timeout(
+					&a2_mux_ctx->ul_wakeup_ack_completion,
+					A2_MUX_COMPLETION_TIMEOUT);
+		a2_mux_ctx->wait_for_ack = 0;
+		if (unlikely(ret == 0)) {
+			IPADBG("%s timeout previous ack\n", __func__);
+			goto bail;
+		}
+	}
+	INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
+	power_vote(1);
+	IPADBG("%s waiting for wakeup ack\n", __func__);
+	ret = wait_for_completion_timeout(&a2_mux_ctx->ul_wakeup_ack_completion,
+					A2_MUX_COMPLETION_TIMEOUT);
+	if (unlikely(ret == 0)) {
+		IPADBG("%s timeout wakeup ack\n", __func__);
+		goto bail;
+	}
+	INIT_COMPLETION(a2_mux_ctx->bam_connection_completion);
+	if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+		ret = wait_for_completion_timeout(
+			&a2_mux_ctx->bam_connection_completion,
+			A2_MUX_COMPLETION_TIMEOUT);
+		if (unlikely(ret == 0)) {
+			IPADBG("%s timeout power on\n", __func__);
+			goto bail;
+		}
+	}
+	a2_mux_ctx->bam_is_connected = true;
+	IPADBG("%s complete\n", __func__);
+	mutex_unlock(&a2_mux_ctx->wakeup_lock);
+	return;
+bail:
+	mutex_unlock(&a2_mux_ctx->wakeup_lock);
+	BUG();
+	return;
+}
+
+static void bam_mux_write_done(bool is_tethered, struct sk_buff *skb)
+{
+	struct tx_pkt_info *info;
+	enum a2_mux_logical_channel_id lcid;
+	unsigned long event_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+	info = list_first_entry(&a2_mux_ctx->bam_tx_pool,
+			struct tx_pkt_info, list_node);
+	if (unlikely(info->skb != skb)) {
+		struct tx_pkt_info *errant_pkt;
+
+		IPAERR("tx_pool mismatch next=%p list_node=%p, ts=%u.%09lu\n",
+				a2_mux_ctx->bam_tx_pool.next,
+				&info->list_node,
+				info->ts_sec, info->ts_nsec
+				);
+
+		list_for_each_entry(errant_pkt,
+				    &a2_mux_ctx->bam_tx_pool, list_node) {
+			IPAERR("%s: node=%p ts=%u.%09lu\n", __func__,
+			&errant_pkt->list_node, errant_pkt->ts_sec,
+			errant_pkt->ts_nsec);
+			if (errant_pkt->skb == skb)
+				info = errant_pkt;
+
+		}
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+				       flags);
+		BUG();
+	}
+	list_del(&info->list_node);
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+	if (info->is_cmd) {
+		dev_kfree_skb_any(info->skb);
+		kfree(info);
+		return;
+	}
+	skb = info->skb;
+	kfree(info);
+	event_data = (unsigned long)(skb);
+	if (is_tethered)
+		lcid = A2_MUX_TETHERED_0;
+	else {
+		struct bam_mux_hdr *hdr = (struct bam_mux_hdr *)skb->data;
+		lcid = (enum a2_mux_logical_channel_id) hdr->ch_id;
+	}
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	a2_mux_ctx->bam_ch[lcid].num_tx_pkts--;
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	if (a2_mux_ctx->bam_ch[lcid].notify_cb)
+		a2_mux_ctx->bam_ch[lcid].notify_cb(
+			a2_mux_ctx->bam_ch[lcid].user_data, A2_MUX_WRITE_DONE,
+							event_data);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+static void kickoff_ul_power_down_func(struct work_struct *work)
+{
+	unsigned long flags;
+
+	write_lock_irqsave(&a2_mux_ctx->ul_wakeup_lock, flags);
+	if (a2_mux_ctx->bam_is_connected) {
+		IPADBG("%s: UL active - forcing powerdown\n", __func__);
+		ul_powerdown();
+	}
+	write_unlock_irqrestore(&a2_mux_ctx->ul_wakeup_lock, flags);
+	ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
+			IPA_RM_RESOURCE_A2_CONS);
+}
+
+static void kickoff_ul_wakeup_func(struct work_struct *work)
+{
+	if (!a2_mux_ctx->bam_is_connected)
+		ul_wakeup();
+	ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+			IPA_RM_RESOURCE_A2_CONS);
+}
+
+static void kickoff_ul_request_resource_func(struct work_struct *work)
+{
+	int ret;
+
+	INIT_COMPLETION(a2_mux_ctx->request_resource_completion);
+	ret = ipa_rm_request_resource(IPA_RM_RESOURCE_A2_PROD);
+	if (ret < 0 && ret != -EINPROGRESS) {
+		IPAERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+		       ret);
+		return;
+	}
+	if (ret == -EINPROGRESS) {
+		ret = wait_for_completion_timeout(
+			&a2_mux_ctx->request_resource_completion,
+			A2_MUX_COMPLETION_TIMEOUT);
+		if (unlikely(ret == 0)) {
+			IPADBG("%s timeout request A2 PROD resource\n",
+				     __func__);
+			BUG();
+			return;
+		}
+	}
+	toggle_apps_ack();
+}
+
+static bool msm_bam_dmux_kickoff_ul_wakeup(void)
+{
+	bool is_connected;
+
+	read_lock(&a2_mux_ctx->ul_wakeup_lock);
+	is_connected = a2_mux_ctx->bam_is_connected;
+	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+	if (!is_connected)
+		queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+			   &a2_mux_ctx->kickoff_ul_wakeup);
+	return is_connected;
+}
+
+static bool msm_bam_dmux_kickoff_ul_power_down(void)
+
+{
+	bool is_connected;
+
+	read_lock(&a2_mux_ctx->ul_wakeup_lock);
+	is_connected = a2_mux_ctx->bam_is_connected;
+	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+	if (is_connected)
+		queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+			   &a2_mux_ctx->kickoff_ul_power_down);
+	return is_connected;
+}
+
+static void ipa_embedded_notify(void *priv,
+				enum ipa_dp_evt_type evt,
+				unsigned long data)
+{
+	switch (evt) {
+	case IPA_RECEIVE:
+		handle_bam_mux_cmd((struct sk_buff *)data);
+		break;
+	case IPA_WRITE_DONE:
+		bam_mux_write_done(false, (struct sk_buff *)data);
+		break;
+	default:
+		IPAERR("%s: Unknown event %d\n", __func__, evt);
+		break;
+	}
+}
+
+static void ipa_tethered_notify(void *priv,
+				enum ipa_dp_evt_type evt,
+				unsigned long data)
+{
+	IPADBG("%s: event = %d\n", __func__, evt);
+	switch (evt) {
+	case IPA_RECEIVE:
+		if (a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb)
+			a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb(
+				a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].user_data,
+				A2_MUX_RECEIVE,
+				data);
+		break;
+	case IPA_WRITE_DONE:
+		bam_mux_write_done(true, (struct sk_buff *)data);
+		break;
+	default:
+		IPAERR("%s: Unknown event %d\n", __func__, evt);
+		break;
+	}
+}
+
+static int connect_to_bam(void)
+{
+	int ret;
+	struct ipa_sys_connect_params connect_params;
+
+	IPAERR("%s:\n", __func__);
+	if (a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+		IPAERR("%s: SW bridge is already UP\n",
+				__func__);
+		return -EFAULT;
+	}
+	ret = sps_device_reset(a2_mux_ctx->a2_device_handle);
+	if (ret)
+		IPAERR("%s: device reset failed ret = %d\n",
+		       __func__, ret);
+	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+	connect_params.client = IPA_CLIENT_A2_TETHERED_CONS;
+	connect_params.notify = ipa_tethered_notify;
+	connect_params.desc_fifo_sz = 0x800;
+	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+			&connect_params,
+			&a2_mux_ctx->tethered_prod);
+	if (ret) {
+		IPAERR("%s: IPA bridge tethered UL failed to connect: %d\n",
+				__func__, ret);
+		goto bridge_tethered_ul_failed;
+	}
+	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+	connect_params.ipa_ep_cfg.mode.mode = IPA_DMA;
+	connect_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
+	connect_params.client = IPA_CLIENT_A2_TETHERED_PROD;
+	connect_params.notify = ipa_tethered_notify;
+	connect_params.desc_fifo_sz = 0x800;
+	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+			&connect_params,
+			&a2_mux_ctx->tethered_cons);
+	if (ret) {
+		IPAERR("%s: IPA bridge tethered DL failed to connect: %d\n",
+				__func__, ret);
+		goto bridge_tethered_dl_failed;
+	}
+	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+	connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
+	connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 6;
+	connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS;
+	connect_params.notify = ipa_embedded_notify;
+	connect_params.desc_fifo_sz = 0x800;
+	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+			&connect_params,
+			&a2_mux_ctx->embedded_prod);
+	if (ret) {
+		IPAERR("%s: IPA bridge embedded UL failed to connect: %d\n",
+				__func__, ret);
+		goto bridge_embedded_ul_failed;
+	}
+	memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
+	connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
+	connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 4;
+	connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD;
+	connect_params.notify = ipa_embedded_notify;
+	connect_params.desc_fifo_sz = 0x800;
+	ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
+			&connect_params,
+			&a2_mux_ctx->embedded_cons);
+	if (ret) {
+		IPAERR("%s: IPA bridge embedded DL failed to connect: %d\n",
+		       __func__, ret);
+		goto bridge_embedded_dl_failed;
+	}
+	a2_mux_ctx->a2_mux_sw_bridge_is_connected = 1;
+	complete_all(&a2_mux_ctx->bam_connection_completion);
+	return 0;
+
+bridge_embedded_dl_failed:
+	ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+			a2_mux_ctx->embedded_prod);
+bridge_embedded_ul_failed:
+	ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+			a2_mux_ctx->tethered_cons);
+bridge_tethered_dl_failed:
+	ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+			a2_mux_ctx->tethered_prod);
+bridge_tethered_ul_failed:
+	return ret;
+}
+
+static int disconnect_to_bam(void)
+{
+	int ret;
+
+	IPAERR("%s\n", __func__);
+	if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
+		IPAERR("%s: SW bridge is already DOWN\n",
+				__func__);
+		return -EFAULT;
+	}
+	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
+			a2_mux_ctx->tethered_prod);
+	if (ret) {
+		IPAERR("%s: IPA bridge tethered UL failed to disconnect: %d\n",
+				__func__, ret);
+		return ret;
+	}
+	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
+			a2_mux_ctx->tethered_cons);
+	if (ret) {
+		IPAERR("%s: IPA bridge tethered DL failed to disconnect: %d\n",
+				__func__, ret);
+		return ret;
+	}
+	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
+			a2_mux_ctx->embedded_prod);
+	if (ret) {
+		IPAERR("%s: IPA bridge embedded UL failed to disconnect: %d\n",
+				__func__, ret);
+		return ret;
+	}
+	ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
+			a2_mux_ctx->embedded_cons);
+	if (ret) {
+		IPAERR("%s: IPA bridge embedded DL failed to disconnect: %d\n",
+				__func__, ret);
+		return ret;
+	}
+	ret = sps_device_reset(a2_mux_ctx->a2_device_handle);
+	if (ret) {
+		IPAERR("%s: device reset failed ret = %d\n",
+			__func__, ret);
+		return ret;
+	}
+	verify_tx_queue_is_empty(__func__);
+	(void) ipa_rm_release_resource(IPA_RM_RESOURCE_A2_PROD);
+	if (a2_mux_ctx->disconnect_ack)
+		toggle_apps_ack();
+	a2_mux_ctx->a2_mux_sw_bridge_is_connected = 0;
+	complete_all(&a2_mux_ctx->bam_connection_completion);
+	return 0;
+}
+
+static void bam_dmux_smsm_cb(void *priv,
+		u32 old_state,
+		u32 new_state)
+{
+	static int last_processed_state;
+
+	mutex_lock(&a2_mux_ctx->smsm_cb_lock);
+	IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+			new_state);
+	if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
+		IPADBG("%s: already processed this state\n", __func__);
+		mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
+		return;
+	}
+	last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
+	if (new_state & SMSM_A2_POWER_CONTROL) {
+		IPADBG("%s: MODEM PWR CTRL 1\n", __func__);
+		grab_wakelock();
+		(void) connect_to_bam();
+		queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
+			   &a2_mux_ctx->kickoff_ul_request_resource);
+	} else if (!(new_state & SMSM_A2_POWER_CONTROL)) {
+		IPADBG("%s: MODEM PWR CTRL 0\n", __func__);
+		(void) disconnect_to_bam();
+		release_wakelock();
+	} else {
+		IPAERR("%s: unsupported state change\n", __func__);
+	}
+	mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
+}
+
+static void bam_dmux_smsm_ack_cb(void *priv, u32 old_state,
+						u32 new_state)
+{
+	IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+			new_state);
+	complete_all(&a2_mux_ctx->ul_wakeup_ack_completion);
+}
+
+static int a2_mux_pm_rm_request_resource(void)
+{
+	int result = 0;
+	bool is_connected;
+
+	is_connected = msm_bam_dmux_kickoff_ul_wakeup();
+	if (!is_connected)
+		result = -EINPROGRESS;
+	return result;
+}
+
+static int a2_mux_pm_rm_release_resource(void)
+{
+	int result = 0;
+	bool is_connected;
+
+	is_connected = msm_bam_dmux_kickoff_ul_power_down();
+	if (is_connected)
+		result = -EINPROGRESS;
+	return result;
+}
+
+static void a2_mux_pm_rm_notify_cb(void *user_data,
+		enum ipa_rm_event event,
+		unsigned long data)
+{
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		IPADBG("%s: PROD GRANTED CB\n", __func__);
+		complete_all(&a2_mux_ctx->request_resource_completion);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		IPADBG("%s: PROD RELEASED CB\n", __func__);
+		break;
+	default:
+		return;
+	}
+}
+static int a2_mux_pm_initialize_rm(void)
+{
+	struct ipa_rm_create_params create_params;
+	int result;
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_A2_PROD;
+	create_params.reg_params.notify_cb = &a2_mux_pm_rm_notify_cb;
+	result = ipa_rm_create_resource(&create_params);
+	if (result)
+		goto bail;
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_A2_CONS;
+	create_params.release_resource = &a2_mux_pm_rm_release_resource;
+	create_params.request_resource = &a2_mux_pm_rm_request_resource;
+	result = ipa_rm_create_resource(&create_params);
+bail:
+	return result;
+}
+
+static void bam_mux_process_data(struct sk_buff *rx_skb)
+{
+	unsigned long flags;
+	struct bam_mux_hdr *rx_hdr;
+	unsigned long event_data;
+
+	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
+	rx_skb->data = (unsigned char *)(rx_hdr + 1);
+	rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
+	rx_skb->len = rx_hdr->pkt_len;
+	rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
+	event_data = (unsigned long)(rx_skb);
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+	if (a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb)
+		a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb(
+			a2_mux_ctx->bam_ch[rx_hdr->ch_id].user_data,
+			A2_MUX_RECEIVE,
+			event_data);
+	else
+		dev_kfree_skb_any(rx_skb);
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+			       flags);
+}
+
+static void handle_bam_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+	a2_mux_ctx->bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
+	a2_mux_ctx->bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+			       flags);
+}
+
+static void handle_bam_mux_cmd(struct sk_buff *rx_skb)
+{
+	unsigned long flags;
+	struct bam_mux_hdr *rx_hdr;
+
+	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
+	IPADBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n",
+			__func__,
+			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
+			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+	rx_hdr->magic_num = ntohs(rx_hdr->magic_num);
+	rx_hdr->pkt_len = ntohs(rx_hdr->pkt_len);
+	IPADBG("%s: converted to host order magic_num=%d, pkt_len=%d\n",
+	    __func__, rx_hdr->magic_num, rx_hdr->pkt_len);
+	if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
+		IPAERR("bad hdr magic %x rvd %d cmd %d pad %d ch %d len %d\n",
+		       rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
+			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+		dev_kfree_skb_any(rx_skb);
+		return;
+	}
+	if (rx_hdr->ch_id >= A2_MUX_NUM_CHANNELS) {
+		IPAERR("bad LCID %d rsvd %d cmd %d pad %d ch %d len %d\n",
+			rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
+			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+		dev_kfree_skb_any(rx_skb);
+		return;
+	}
+	switch (rx_hdr->cmd) {
+	case BAM_MUX_HDR_CMD_DATA:
+		bam_mux_process_data(rx_skb);
+		break;
+	case BAM_MUX_HDR_CMD_OPEN:
+		IPADBG("%s: opening cid %d PC enabled\n", __func__,
+				rx_hdr->ch_id);
+		handle_bam_mux_cmd_open(rx_hdr);
+		if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
+			IPADBG("%s: deactivating disconnect ack\n",
+								__func__);
+			a2_mux_ctx->disconnect_ack = 0;
+		}
+		dev_kfree_skb_any(rx_skb);
+		if (a2_mux_ctx->a2_mux_send_power_vote_on_init_once) {
+			kickoff_ul_wakeup_func(NULL);
+			a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 0;
+		}
+		break;
+	case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
+		IPADBG("%s: opening cid %d PC disabled\n", __func__,
+				rx_hdr->ch_id);
+		if (!a2_mux_ctx->a2_pc_disabled) {
+			a2_mux_ctx->a2_pc_disabled = 1;
+			ul_wakeup();
+		}
+		handle_bam_mux_cmd_open(rx_hdr);
+		dev_kfree_skb_any(rx_skb);
+		break;
+	case BAM_MUX_HDR_CMD_CLOSE:
+		/* probably should drop pending write */
+		IPADBG("%s: closing cid %d\n", __func__,
+				rx_hdr->ch_id);
+		spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
+				  flags);
+		a2_mux_ctx->bam_ch[rx_hdr->ch_id].status &=
+			~BAM_CH_REMOTE_OPEN;
+		spin_unlock_irqrestore(
+			&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
+		dev_kfree_skb_any(rx_skb);
+		break;
+	default:
+		IPAERR("bad hdr.magic %x rvd %d cmd %d pad %d ch %d len %d\n",
+			rx_hdr->magic_num, rx_hdr->reserved,
+			rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
+			rx_hdr->pkt_len);
+		dev_kfree_skb_any(rx_skb);
+		return;
+	}
+}
+
+static int bam_mux_write_cmd(void *data, u32 len)
+{
+	int rc;
+	struct tx_pkt_info *pkt;
+	unsigned long flags;
+
+	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
+	if (pkt == NULL) {
+		IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
+		return -ENOMEM;
+	}
+	pkt->skb = __dev_alloc_skb(len, GFP_NOWAIT | __GFP_NOWARN);
+	if (pkt->skb == NULL) {
+		IPAERR("%s: unable to alloc skb\n\n", __func__);
+		kfree(pkt);
+		return -ENOMEM;
+	}
+	memcpy(skb_put(pkt->skb, len), data, len);
+	kfree(data);
+	pkt->len = len;
+	pkt->is_cmd = 1;
+	set_tx_timestamp(pkt);
+	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+	list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
+	rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, pkt->skb, NULL);
+	if (rc) {
+		IPAERR("%s ipa_tx_dp failed rc=%d\n",
+			__func__, rc);
+		list_del(&pkt->list_node);
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+				       flags);
+		dev_kfree_skb_any(pkt->skb);
+		kfree(pkt);
+	} else {
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+				       flags);
+	}
+	return rc;
+}
 
 /**
- * a2_mux_initialize() - initialize A2 MUX module
+ * a2_mux_get_tethered_client_handles() - provide the tethred
+ *		pipe handles for post setup configuration
+ * @lcid: logical channel ID
+ * @clnt_cons_handle: [out] consumer pipe handle
+ * @clnt_prod_handle: [out] producer pipe handle
  *
- * Return codes:
- * 0: success
+ * Returns: 0 on success, negative on failure
  */
-int a2_mux_initialize(void)
+int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
+		unsigned int *clnt_cons_handle,
+		unsigned int *clnt_prod_handle)
 {
-	(void) msm_bam_dmux_ul_power_vote();
-
+	if (!a2_mux_ctx->a2_mux_initialized || lcid != A2_MUX_TETHERED_0)
+		return -ENODEV;
+	if (!clnt_cons_handle || !clnt_prod_handle)
+		return -EINVAL;
+	*clnt_prod_handle = a2_mux_ctx->tethered_prod;
+	*clnt_cons_handle = a2_mux_ctx->tethered_cons;
 	return 0;
 }
 
 /**
- * a2_mux_close() - close A2 MUX module
+ * a2_mux_write() - send the packet to A2,
+ *		add MUX header acc to lcid provided
+ * @id: logical channel ID
+ * @skb: SKB to write
  *
- * Return codes:
- * 0: success
- * -EINVAL: invalid parameters
+ * Returns: 0 on success, negative on failure
  */
-int a2_mux_close(void)
+int a2_mux_write(enum a2_mux_logical_channel_id id, struct sk_buff *skb)
 {
-	int ret = 0;
+	int rc = 0;
+	struct bam_mux_hdr *hdr;
+	unsigned long flags;
+	struct sk_buff *new_skb = NULL;
+	struct tx_pkt_info *pkt;
+	bool is_connected;
 
-	(void) msm_bam_dmux_ul_power_unvote();
+	if (id >= A2_MUX_NUM_CHANNELS)
+		return -EINVAL;
+	if (!skb)
+		return -EINVAL;
+	if (!a2_mux_ctx->a2_mux_initialized)
+		return -ENODEV;
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
+	if (!bam_ch_is_open(id)) {
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+		IPAERR("%s: port not open: %d\n",
+		       __func__,
+		       a2_mux_ctx->bam_ch[id].status);
+		return -ENODEV;
+	}
+	if (a2_mux_ctx->bam_ch[id].use_wm &&
+	    (a2_mux_ctx->bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+		IPAERR("%s: watermark exceeded: %d\n", __func__, id);
+		return -EAGAIN;
+	}
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+	read_lock(&a2_mux_ctx->ul_wakeup_lock);
+	is_connected = a2_mux_ctx->bam_is_connected;
+	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+	if (!is_connected)
+		return -ENODEV;
+	if (id != A2_MUX_TETHERED_0) {
+		/*
+		 * if skb do not have any tailroom for padding
+		 * copy the skb into a new expanded skb
+		 */
+		if ((skb->len & 0x3) &&
+		    (skb_tailroom(skb) < A2_MUX_PADDING_LENGTH(skb->len))) {
+			new_skb = skb_copy_expand(skb, skb_headroom(skb),
+					A2_MUX_PADDING_LENGTH(skb->len),
+					GFP_ATOMIC);
+			if (new_skb == NULL) {
+				IPAERR("%s: cannot allocate skb\n", __func__);
+				rc = -ENOMEM;
+				goto write_fail;
+			}
+			dev_kfree_skb_any(skb);
+			skb = new_skb;
+		}
+		hdr = (struct bam_mux_hdr *)skb_push(
+					skb, sizeof(struct bam_mux_hdr));
+		/*
+		 * caller should allocate for hdr and padding
+		 * hdr is fine, padding is tricky
+		 */
+		hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+		hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+		hdr->reserved = 0;
+		hdr->ch_id = id;
+		hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
+		if (skb->len & 0x3)
+			skb_put(skb, A2_MUX_PADDING_LENGTH(skb->len));
+		hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) +
+					   hdr->pkt_len);
+		IPADBG("data %p, tail %p skb len %d pkt len %d pad len %d\n",
+		    skb->data, skb->tail, skb->len,
+		    hdr->pkt_len, hdr->pad_len);
+		hdr->magic_num = htons(hdr->magic_num);
+		hdr->pkt_len = htons(hdr->pkt_len);
+		IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+		    hdr->magic_num, hdr->pkt_len);
+	}
+	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
+	if (pkt == NULL) {
+		IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
+		rc = -ENOMEM;
+		goto write_fail2;
+	}
+	pkt->skb = skb;
+	pkt->is_cmd = 0;
+	set_tx_timestamp(pkt);
+	spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
+	list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
+	if (id == A2_MUX_TETHERED_0)
+		rc = ipa_tx_dp(IPA_CLIENT_A2_TETHERED_CONS, skb, NULL);
+	else
+		rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, skb, NULL);
+	if (rc) {
+		IPAERR("%s ipa_tx_dp failed rc=%d\n",
+			__func__, rc);
+		list_del(&pkt->list_node);
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+				       flags);
+		goto write_fail3;
+	} else {
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
+				       flags);
+		spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
+		a2_mux_ctx->bam_ch[id].num_tx_pkts++;
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
+	}
+	return 0;
 
-	ret = ipa_disconnect(a2_service_cb.consumer_handle);
-	if (0 != ret) {
-		pr_err("%s: ipa_disconnect failure\n", __func__);
+write_fail3:
+	kfree(pkt);
+write_fail2:
+	if (new_skb)
+		dev_kfree_skb_any(new_skb);
+write_fail:
+	return rc;
+}
+
+/**
+ * a2_mux_add_hdr() - called when MUX header should
+ *		be added
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int a2_mux_add_hdr(enum a2_mux_logical_channel_id lcid)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	struct bam_mux_hdr *dmux_hdr;
+	int rc;
+
+	IPADBG("%s: ch %d\n", __func__, lcid);
+
+	if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
+		IPAERR("%s: non valid lcid passed: %d\n", __func__, lcid);
+		return -EINVAL;
+	}
+
+
+	hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+		       2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdrs) {
+		IPAERR("%s: hdr allocation fail for ch %d\n", __func__, lcid);
+		return -ENOMEM;
+	}
+
+	ipv4_hdr = &hdrs->hdr[0];
+	ipv6_hdr = &hdrs->hdr[1];
+
+	dmux_hdr = (struct bam_mux_hdr *)ipv4_hdr->hdr;
+	snprintf(ipv4_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF, lcid);
+	dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+	dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+	dmux_hdr->reserved = 0;
+	dmux_hdr->ch_id = lcid;
+
+	/* Packet lenght is added by IPA */
+	dmux_hdr->pkt_len = 0;
+	dmux_hdr->pad_len = 0;
+
+	dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
+	IPADBG("converted to network order magic_num=%d\n",
+		    dmux_hdr->magic_num);
+
+	ipv4_hdr->hdr_len = sizeof(struct bam_mux_hdr);
+	ipv4_hdr->is_partial = 0;
+
+	dmux_hdr = (struct bam_mux_hdr *)ipv6_hdr->hdr;
+	snprintf(ipv6_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V6_PREF, lcid);
+	dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+	dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
+	dmux_hdr->reserved = 0;
+	dmux_hdr->ch_id = lcid;
+
+	/* Packet lenght is added by IPA */
+	dmux_hdr->pkt_len = 0;
+	dmux_hdr->pad_len = 0;
+
+	dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
+	IPADBG("converted to network order magic_num=%d\n",
+		    dmux_hdr->magic_num);
+
+	ipv6_hdr->hdr_len = sizeof(struct bam_mux_hdr);
+	ipv6_hdr->is_partial = 0;
+
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+
+	rc = ipa_add_hdr(hdrs);
+	if (rc) {
+		IPAERR("Fail on Header-Insertion(%d)\n", rc);
 		goto bail;
 	}
 
-	ret = ipa_disconnect(a2_service_cb.producer_handle);
-	if (0 != ret) {
-		pr_err("%s: ipa_disconnect failure\n", __func__);
+	if (ipv4_hdr->status) {
+		IPAERR("Fail on Header-Insertion ipv4(%d)\n",
+				ipv4_hdr->status);
+		rc = ipv4_hdr->status;
 		goto bail;
 	}
 
-	ret = 0;
+	if (ipv6_hdr->status) {
+		IPAERR("%s: Fail on Header-Insertion ipv4(%d)\n", __func__,
+				ipv6_hdr->status);
+		rc = ipv6_hdr->status;
+		goto bail;
+	}
 
+	a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+	rc = 0;
 bail:
+	kfree(hdrs);
+	return rc;
+}
 
+/**
+ * a2_mux_del_hdr() - called when MUX header should
+ *		be removed
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int a2_mux_del_hdr(enum a2_mux_logical_channel_id lcid)
+{
+	struct ipa_ioc_del_hdr *hdrs;
+	struct ipa_hdr_del *ipv4_hdl;
+	struct ipa_hdr_del *ipv6_hdl;
+	int rc;
+
+	IPADBG("%s: ch %d\n", __func__, lcid);
+
+	if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
+		IPAERR("invalid lcid passed: %d\n", lcid);
+		return -EINVAL;
+	}
+
+
+	hdrs = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+		       2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+	if (!hdrs) {
+		IPAERR("hdr alloc fail for ch %d\n", lcid);
+		return -ENOMEM;
+	}
+
+	ipv4_hdl = &hdrs->hdl[0];
+	ipv6_hdl = &hdrs->hdl[1];
+
+	ipv4_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl;
+	ipv6_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl;
+
+	hdrs->commit = 1;
+	hdrs->num_hdls = 2;
+
+	rc = ipa_del_hdr(hdrs);
+	if (rc) {
+		IPAERR("Fail on Del Header-Insertion(%d)\n", rc);
+		goto bail;
+	}
+
+	if (ipv4_hdl->status) {
+		IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
+				ipv4_hdl->status);
+		rc = ipv4_hdl->status;
+		goto bail;
+	}
+	a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = 0;
+
+	if (ipv6_hdl->status) {
+		IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
+				ipv6_hdl->status);
+		rc = ipv6_hdl->status;
+		goto bail;
+	}
+	a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = 0;
+
+	rc = 0;
+bail:
+	kfree(hdrs);
+	return rc;
+
+}
+
+/**
+ * a2_mux_open_channel() - opens logical channel
+ *		to A2
+ * @lcid: logical channel ID
+ * @user_data: user provided data for below CB
+ * @notify_cb: user provided notification CB
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
+			void *user_data,
+			a2_mux_notify_cb notify_cb)
+{
+	struct bam_mux_hdr *hdr;
+	unsigned long flags;
+	int rc = 0;
+	bool is_connected;
+
+	IPADBG("%s: opening ch %d\n", __func__, lcid);
+	if (!a2_mux_ctx->a2_mux_initialized) {
+		IPAERR("%s: not inititialized\n", __func__);
+		return -ENODEV;
+	}
+	if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) {
+		IPAERR("%s: invalid channel id %d\n", __func__, lcid);
+		return -EINVAL;
+	}
+	if (notify_cb == NULL) {
+		IPAERR("%s: notify function is NULL\n", __func__);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	if (bam_ch_is_open(lcid)) {
+		IPAERR("%s: Already opened %d\n", __func__, lcid);
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+		goto open_done;
+	}
+	if (!bam_ch_is_remote_open(lcid)) {
+		IPAERR("%s: Remote not open; ch: %d\n", __func__, lcid);
+		spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+		return -ENODEV;
+	}
+	a2_mux_ctx->bam_ch[lcid].notify_cb = notify_cb;
+	a2_mux_ctx->bam_ch[lcid].user_data = user_data;
+	a2_mux_ctx->bam_ch[lcid].status |= BAM_CH_LOCAL_OPEN;
+	a2_mux_ctx->bam_ch[lcid].num_tx_pkts = 0;
+	a2_mux_ctx->bam_ch[lcid].use_wm = 0;
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	read_lock(&a2_mux_ctx->ul_wakeup_lock);
+	is_connected = a2_mux_ctx->bam_is_connected;
+	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+	if (!is_connected)
+		return -ENODEV;
+	if (lcid != A2_MUX_TETHERED_0) {
+		hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
+		if (hdr == NULL) {
+			IPAERR("%s: hdr kmalloc failed. ch: %d\n",
+			       __func__, lcid);
+			return -ENOMEM;
+		}
+		hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+		if (a2_mux_ctx->a2_mux_apps_pc_enabled) {
+			hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
+		} else {
+			IPAERR("%s: PC DISABLED BY A5 SW BY INTENTION\n",
+					__func__);
+			a2_mux_ctx->a2_pc_disabled = 1;
+			hdr->cmd = BAM_MUX_HDR_CMD_OPEN_NO_A2_PC;
+		}
+		hdr->reserved = 0;
+		hdr->ch_id = lcid;
+		hdr->pkt_len = 0;
+		hdr->pad_len = 0;
+		hdr->magic_num = htons(hdr->magic_num);
+		hdr->pkt_len = htons(hdr->pkt_len);
+		IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+		    hdr->magic_num, hdr->pkt_len);
+		rc = bam_mux_write_cmd((void *)hdr,
+				       sizeof(struct bam_mux_hdr));
+		if (rc) {
+			IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
+			       __func__, rc, lcid);
+			kfree(hdr);
+			return rc;
+		}
+		rc = a2_mux_add_hdr(lcid);
+		if (rc) {
+			IPAERR("a2_mux_add_hdr failed %d; ch: %d\n",
+			       rc, lcid);
+			return rc;
+		}
+	}
+
+open_done:
+	IPADBG("%s: opened ch %d\n", __func__, lcid);
+	return rc;
+}
+
+/**
+ * a2_mux_close_channel() - closes logical channel
+ *		to A2
+ * @lcid: logical channel ID
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
+{
+	struct bam_mux_hdr *hdr;
+	unsigned long flags;
+	int rc = 0;
+	bool is_connected;
+
+	if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0)
+		return -EINVAL;
+	IPADBG("%s: closing ch %d\n", __func__, lcid);
+	if (!a2_mux_ctx->a2_mux_initialized)
+		return -ENODEV;
+	read_lock(&a2_mux_ctx->ul_wakeup_lock);
+	is_connected = a2_mux_ctx->bam_is_connected;
+	read_unlock(&a2_mux_ctx->ul_wakeup_lock);
+	if (!is_connected && !bam_ch_is_in_reset(lcid))
+		return -ENODEV;
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	a2_mux_ctx->bam_ch[lcid].notify_cb = NULL;
+	a2_mux_ctx->bam_ch[lcid].user_data = NULL;
+	a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_LOCAL_OPEN;
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	if (bam_ch_is_in_reset(lcid)) {
+		a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_IN_RESET;
+		return 0;
+	}
+	if (lcid != A2_MUX_TETHERED_0) {
+		hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
+		if (hdr == NULL) {
+			IPAERR("%s: hdr kmalloc failed. ch: %d\n",
+			       __func__, lcid);
+			return -ENOMEM;
+		}
+		hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
+		hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
+		hdr->reserved = 0;
+		hdr->ch_id = lcid;
+		hdr->pkt_len = 0;
+		hdr->pad_len = 0;
+		hdr->magic_num = htons(hdr->magic_num);
+		hdr->pkt_len = htons(hdr->pkt_len);
+		IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
+		    hdr->magic_num, hdr->pkt_len);
+		rc = bam_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
+		if (rc) {
+			IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
+			       __func__, rc, lcid);
+			kfree(hdr);
+			return rc;
+		}
+
+		rc = a2_mux_del_hdr(lcid);
+		if (rc) {
+			IPAERR("a2_mux_del_hdr failed %d; ch: %d\n",
+			       rc, lcid);
+			return rc;
+		}
+	}
+	IPADBG("%s: closed ch %d\n", __func__, lcid);
+	return 0;
+}
+
+/**
+ * a2_mux_is_ch_full() - checks if channel is above predefined WM,
+ *		used for flow control implementation
+ * @lcid: logical channel ID
+ *
+ * Returns: true if the channel is above predefined WM,
+ *		false otherwise
+ */
+int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
+{
+	unsigned long flags;
+	int ret;
+
+	if (lcid >= A2_MUX_NUM_CHANNELS ||
+			lcid < 0)
+		return -EINVAL;
+	if (!a2_mux_ctx->a2_mux_initialized)
+		return -ENODEV;
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	a2_mux_ctx->bam_ch[lcid].use_wm = 1;
+	ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts >= HIGH_WATERMARK;
+	IPADBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
+	     lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
+	if (!bam_ch_is_local_open(lcid)) {
+		ret = -ENODEV;
+		IPAERR("%s: port not open: %d\n", __func__,
+		       a2_mux_ctx->bam_ch[lcid].status);
+	}
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
 	return ret;
 }
 
 /**
- * a2_mux_open_port() - open connection to A2
- * @wwan_logical_channel_id:	 WWAN logical channel ID
- * @rx_cb:	Rx callback
- * @tx_complete_cb:	Tx completed callback
+ * a2_mux_is_ch_low() - checks if channel is below predefined WM,
+ *		used for flow control implementation
+ * @lcid: logical channel ID
  *
- * Return codes:
- * 0: success
- * -EINVAL: invalid parameters
+ * Returns: true if the channel is below predefined WM,
+ *		false otherwise
  */
-int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
-		void *tx_complete_cb)
+int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
 {
-	int ret = 0;
-	u8 src_pipe = 0;
-	u8 dst_pipe = 0;
-	struct sps_pipe *a2_to_ipa_pipe = NULL;
-	struct sps_pipe *ipa_to_a2_pipe = NULL;
-
-	(void) wwan_logical_channel_id;
-
-	a2_service_cb.rx_cb = rx_cb;
-	a2_service_cb.tx_complete_cb = tx_complete_cb;
-
-	ret = connect_pipe_ipa(A2_TO_IPA,
-			&src_pipe,
-			&(a2_service_cb.consumer_handle),
-			a2_to_ipa_pipe);
-	if (ret) {
-		pr_err("%s: A2 to IPA pipe connection failure\n", __func__);
-		goto bail;
-	}
-
-	ret = connect_pipe_ipa(IPA_TO_A2,
-			&dst_pipe,
-			&(a2_service_cb.producer_handle),
-			ipa_to_a2_pipe);
-	if (ret) {
-		pr_err("%s: IPA to A2 pipe connection failure\n", __func__);
-		sps_disconnect(a2_to_ipa_pipe);
-		sps_free_endpoint(a2_to_ipa_pipe);
-		(void) ipa_disconnect(a2_service_cb.consumer_handle);
-		goto bail;
-	}
-
-	ret = 0;
-
-bail:
-
-	return ret;
-}
-
-static int connect_pipe_ipa(enum a2_mux_pipe_direction pipe_dir,
-			u8 *usb_pipe_idx,
-			u32 *clnt_hdl,
-			struct sps_pipe *pipe)
-{
+	unsigned long flags;
 	int ret;
-	struct sps_connect connection = {0, };
-	u32 a2_handle = 0;
-	u32 a2_phy_addr = 0;
-	struct a2_mux_pipe_connection pipe_connection = { 0, };
-	struct ipa_connect_params ipa_in_params;
-	struct ipa_sps_params sps_out_params;
 
-	memset(&ipa_in_params, 0, sizeof(ipa_in_params));
-	memset(&sps_out_params, 0, sizeof(sps_out_params));
-
-	if (!usb_pipe_idx || !clnt_hdl) {
-		pr_err("connect_pipe_ipa :: null arguments\n");
-		ret = -EINVAL;
-		goto bail;
+	if (lcid >= A2_MUX_NUM_CHANNELS ||
+			lcid < 0)
+		return -EINVAL;
+	if (!a2_mux_ctx->a2_mux_initialized)
+		return -ENODEV;
+	spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
+	a2_mux_ctx->bam_ch[lcid].use_wm = 1;
+	ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts <= LOW_WATERMARK;
+	IPADBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
+	     lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
+	if (!bam_ch_is_local_open(lcid)) {
+		ret = -ENODEV;
+		IPAERR("%s: port not open: %d\n", __func__,
+		       a2_mux_ctx->bam_ch[lcid].status);
 	}
-
-	ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_connection);
-	if (ret) {
-		pr_err("ipa_get_a2_mux_pipe_info failed\n");
-		goto bail;
-	}
-
-	if (pipe_dir == A2_TO_IPA) {
-		a2_phy_addr = pipe_connection.src_phy_addr;
-		ipa_in_params.client = IPA_CLIENT_A2_TETHERED_PROD;
-		ipa_in_params.ipa_ep_cfg.mode.mode = IPA_DMA;
-		ipa_in_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
-		pr_err("-*&- pipe_connection->src_pipe_index = %d\n",
-				pipe_connection.src_pipe_index);
-		ipa_in_params.client_ep_idx = pipe_connection.src_pipe_index;
-	} else {
-		a2_phy_addr = pipe_connection.dst_phy_addr;
-		ipa_in_params.client = IPA_CLIENT_A2_TETHERED_CONS;
-		ipa_in_params.client_ep_idx = pipe_connection.dst_pipe_index;
-	}
-
-	ret = sps_phy2h(a2_phy_addr, &a2_handle);
-	if (ret) {
-		pr_err("%s: sps_phy2h failed (A2 BAM) %d\n", __func__, ret);
-		goto bail;
-	}
-
-	ipa_in_params.client_bam_hdl = a2_handle;
-	ipa_in_params.desc_fifo_sz = pipe_connection.desc_fifo_size;
-	ipa_in_params.data_fifo_sz = pipe_connection.data_fifo_size;
-
-	if (pipe_connection.mem_type == IPA_SPS_PIPE_MEM) {
-		pr_debug("%s: A2 BAM using SPS pipe memory\n", __func__);
-		ret = sps_setup_bam2bam_fifo(&data_mem_buf[pipe_dir],
-				pipe_connection.data_fifo_base_offset,
-				pipe_connection.data_fifo_size, 1);
-		if (ret) {
-			pr_err("%s: data fifo setup failure %d\n",
-					__func__, ret);
-			goto bail;
-		}
-
-		ret = sps_setup_bam2bam_fifo(&desc_mem_buf[pipe_dir],
-				pipe_connection.desc_fifo_base_offset,
-				pipe_connection.desc_fifo_size, 1);
-		if (ret) {
-			pr_err("%s: desc. fifo setup failure %d\n",
-					__func__, ret);
-			goto bail;
-		}
-
-		ipa_in_params.data = data_mem_buf[pipe_dir];
-		ipa_in_params.desc = desc_mem_buf[pipe_dir];
-	}
-
-	ret = a2_ipa_connect_pipe(&ipa_in_params,
-			&sps_out_params,
-			clnt_hdl);
-	if (ret) {
-		pr_err("-**- USB-IPA info: ipa_connect failed\n");
-		pr_err("%s: usb_ipa_connect_pipe failed\n", __func__);
-		goto bail;
-	}
-
-	pipe = sps_alloc_endpoint();
-	if (pipe == NULL) {
-		pr_err("%s: sps_alloc_endpoint failed\n", __func__);
-		ret = -ENOMEM;
-		goto a2_ipa_connect_pipe_failed;
-	}
-
-	ret = sps_get_config(pipe, &connection);
-	if (ret) {
-		pr_err("%s: tx get config failed %d\n", __func__, ret);
-		goto get_config_failed;
-	}
-
-	if (pipe_dir == A2_TO_IPA) {
-		connection.mode = SPS_MODE_SRC;
-		*usb_pipe_idx = connection.src_pipe_index;
-		connection.source = a2_handle;
-		connection.destination = sps_out_params.ipa_bam_hdl;
-		connection.src_pipe_index = pipe_connection.src_pipe_index;
-		connection.dest_pipe_index = sps_out_params.ipa_ep_idx;
-	} else {
-		connection.mode = SPS_MODE_DEST;
-		*usb_pipe_idx = connection.dest_pipe_index;
-		connection.source = sps_out_params.ipa_bam_hdl;
-		connection.destination = a2_handle;
-		connection.src_pipe_index = sps_out_params.ipa_ep_idx;
-		connection.dest_pipe_index = pipe_connection.dst_pipe_index;
-	}
-
-	connection.event_thresh = 16;
-	connection.data = sps_out_params.data;
-	connection.desc = sps_out_params.desc;
-
-	ret = sps_connect(pipe, &connection);
-	if (ret < 0) {
-		pr_err("%s: tx connect error %d\n", __func__, ret);
-		goto error;
-	}
-
-	ret = 0;
-	goto bail;
-error:
-	sps_disconnect(pipe);
-get_config_failed:
-	sps_free_endpoint(pipe);
-a2_ipa_connect_pipe_failed:
-	(void) ipa_disconnect(*clnt_hdl);
-bail:
+	spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
 	return ret;
 }
 
-static int a2_ipa_connect_pipe(struct ipa_connect_params *in_params,
-		struct ipa_sps_params *out_params, u32 *clnt_hdl)
+static int a2_mux_initialize_context(int handle)
 {
-	return ipa_connect(in_params, out_params, clnt_hdl);
+	int i;
+
+	a2_mux_ctx->a2_mux_apps_pc_enabled = 1;
+	a2_mux_ctx->a2_device_handle = handle;
+	INIT_WORK(&a2_mux_ctx->kickoff_ul_wakeup, kickoff_ul_wakeup_func);
+	INIT_WORK(&a2_mux_ctx->kickoff_ul_power_down,
+		  kickoff_ul_power_down_func);
+	INIT_WORK(&a2_mux_ctx->kickoff_ul_request_resource,
+		  kickoff_ul_request_resource_func);
+	INIT_LIST_HEAD(&a2_mux_ctx->bam_tx_pool);
+	spin_lock_init(&a2_mux_ctx->bam_tx_pool_spinlock);
+	mutex_init(&a2_mux_ctx->wakeup_lock);
+	rwlock_init(&a2_mux_ctx->ul_wakeup_lock);
+	spin_lock_init(&a2_mux_ctx->wakelock_reference_lock);
+	a2_mux_ctx->disconnect_ack = 1;
+	mutex_init(&a2_mux_ctx->smsm_cb_lock);
+	for (i = 0; i < A2_MUX_NUM_CHANNELS; ++i)
+		spin_lock_init(&a2_mux_ctx->bam_ch[i].lock);
+	init_completion(&a2_mux_ctx->ul_wakeup_ack_completion);
+	init_completion(&a2_mux_ctx->bam_connection_completion);
+	init_completion(&a2_mux_ctx->request_resource_completion);
+	wake_lock_init(&a2_mux_ctx->bam_wakelock,
+		       WAKE_LOCK_SUSPEND, "a2_mux_wakelock");
+	a2_mux_ctx->a2_mux_initialized = 1;
+	a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 1;
+	a2_mux_ctx->a2_mux_tx_workqueue =
+		create_singlethread_workqueue("a2_mux_tx");
+	if (!a2_mux_ctx->a2_mux_tx_workqueue) {
+		IPAERR("%s: a2_mux_tx_workqueue alloc failed\n",
+		       __func__);
+		return -ENOMEM;
+	}
+	return 0;
 }
 
+/**
+ * a2_mux_init() - initialize A2 MUX component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int a2_mux_init(void)
+{
+	int rc;
+	u32 h;
+	void *a2_virt_addr;
+	u32 a2_bam_mem_base;
+	u32 a2_bam_mem_size;
+	u32 a2_bam_irq;
+	struct sps_bam_props a2_props;
+
+
+	IPADBG("%s A2 MUX\n", __func__);
+	rc = ipa_get_a2_mux_bam_info(&a2_bam_mem_base,
+				     &a2_bam_mem_size,
+				     &a2_bam_irq);
+	if (rc) {
+		IPAERR("%s: ipa_get_a2_mux_bam_info failed\n", __func__);
+		rc = -EFAULT;
+		goto bail;
+	}
+	a2_virt_addr = ioremap_nocache((unsigned long)(a2_bam_mem_base),
+							a2_bam_mem_size);
+	if (!a2_virt_addr) {
+		IPAERR("%s: ioremap failed\n", __func__);
+		rc = -ENOMEM;
+		goto bail;
+	}
+	memset(&a2_props, 0, sizeof(a2_props));
+	a2_props.phys_addr		= a2_bam_mem_base;
+	a2_props.virt_addr		= a2_virt_addr;
+	a2_props.virt_size		= a2_bam_mem_size;
+	a2_props.irq			= a2_bam_irq;
+	a2_props.options		= SPS_BAM_OPT_IRQ_WAKEUP;
+	a2_props.num_pipes		= A2_NUM_PIPES;
+	a2_props.summing_threshold	= A2_SUMMING_THRESHOLD;
+	/* need to free on tear down */
+	rc = sps_register_bam_device(&a2_props, &h);
+	if (rc < 0) {
+		IPAERR("%s: register bam error %d\n", __func__, rc);
+		goto register_bam_failed;
+	}
+	a2_mux_ctx = kzalloc(sizeof(*a2_mux_ctx), GFP_KERNEL);
+	if (!a2_mux_ctx) {
+		IPAERR("%s: a2_mux_ctx alloc failed, rc: %d\n", __func__, rc);
+		rc = -ENOMEM;
+		goto register_bam_failed;
+	}
+	rc = a2_mux_initialize_context(h);
+	if (rc) {
+		IPAERR("%s: a2_mux_initialize_context failed, rc: %d\n",
+		       __func__, rc);
+		goto ctx_alloc_failed;
+	}
+	rc = a2_mux_pm_initialize_rm();
+	if (rc) {
+		IPAERR("%s: a2_mux_pm_initialize_rm failed, rc: %d\n",
+		       __func__, rc);
+		goto ctx_alloc_failed;
+	}
+	rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
+					bam_dmux_smsm_cb, NULL);
+	if (rc) {
+		IPAERR("%s: smsm cb register failed, rc: %d\n", __func__, rc);
+		rc = -ENOMEM;
+		goto ctx_alloc_failed;
+	}
+	rc = smsm_state_cb_register(SMSM_MODEM_STATE,
+				    SMSM_A2_POWER_CONTROL_ACK,
+				    bam_dmux_smsm_ack_cb, NULL);
+	if (rc) {
+		IPAERR("%s: smsm ack cb register failed, rc: %d\n",
+		       __func__, rc);
+		rc = -ENOMEM;
+		goto smsm_ack_cb_reg_failed;
+	}
+	if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
+		bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
+
+	/*
+	 * Set remote channel open for tethered channel since there is
+	 *  no actual remote tethered channel
+	 */
+	a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].status |= BAM_CH_REMOTE_OPEN;
+
+	rc = 0;
+	goto bail;
+
+smsm_ack_cb_reg_failed:
+	smsm_state_cb_deregister(SMSM_MODEM_STATE,
+				SMSM_A2_POWER_CONTROL,
+				bam_dmux_smsm_cb, NULL);
+ctx_alloc_failed:
+	kfree(a2_mux_ctx);
+register_bam_failed:
+	iounmap(a2_virt_addr);
+bail:
+	return rc;
+}
+
+/**
+ * a2_mux_exit() - destroy A2 MUX component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int a2_mux_exit(void)
+{
+	smsm_state_cb_deregister(SMSM_MODEM_STATE,
+			SMSM_A2_POWER_CONTROL_ACK,
+			bam_dmux_smsm_ack_cb,
+			NULL);
+	smsm_state_cb_deregister(SMSM_MODEM_STATE,
+				SMSM_A2_POWER_CONTROL,
+				bam_dmux_smsm_cb,
+				NULL);
+	if (a2_mux_ctx->a2_mux_tx_workqueue)
+		destroy_workqueue(a2_mux_ctx->a2_mux_tx_workqueue);
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 7690b21..edf3a60 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -26,6 +26,7 @@
 #include <mach/msm_bus.h>
 #include <mach/msm_bus_board.h>
 #include "ipa_i.h"
+#include "ipa_rm_i.h"
 
 #define IPA_SUMMING_THRESHOLD (0x10)
 #define IPA_PIPE_MEM_START_OFST (0x0)
@@ -805,7 +806,7 @@
 
 		/* check all the system pipes for tx comp and rx avail */
 		if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
-			cnt |= ipa_handle_rx_core(false);
+			cnt |= ipa_handle_rx_core(false, true);
 
 		for (i = 0; i < num_tx_pipes; i++)
 			if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
@@ -1068,6 +1069,33 @@
 	return 0;
 }
 
+/**
+* ipa_get_a2_mux_bam_info() - Exposes A2 parameters fetched from
+* DTS
+*
+* @a2_bam_mem_base: A2 BAM Memory base
+* @a2_bam_mem_size: A2 BAM Memory size
+* @a2_bam_irq: A2 BAM IRQ
+*
+* Return codes:
+* 0: success
+* -EFAULT: invalid parameters
+*/
+int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
+			    u32 *a2_bam_irq)
+{
+	if (!a2_bam_mem_base || !a2_bam_mem_size || !a2_bam_irq) {
+		IPAERR("ipa_get_a2_mux_bam_info null args\n");
+		return -EFAULT;
+	}
+
+	*a2_bam_mem_base = ipa_res.a2_bam_mem_base;
+	*a2_bam_mem_size = ipa_res.a2_bam_mem_size;
+	*a2_bam_irq = ipa_res.a2_bam_irq;
+
+	return 0;
+}
+
 static void ipa_set_aggregation_params(void)
 {
 	struct ipa_ep_cfg_aggr agg_params;
@@ -1416,7 +1444,8 @@
 {
 	void *bam_cnfg_bits;
 
-	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+	if ((ipa_ctx->ipa_hw_type == IPA_HW_v1_0) ||
+	    (ipa_ctx->ipa_hw_type == IPA_HW_v1_1)) {
 		bam_cnfg_bits = ioremap(res->ipa_mem_base +
 						IPA_BAM_REG_BASE_OFST,
 					IPA_BAM_REMAP_SIZE);
@@ -1528,6 +1557,7 @@
 * - Create empty routing table in system memory(no committing)
 * - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
 * - Create a char-device for IPA
+* - Initialize IPA RM (resource manager)
 */
 static int ipa_init(const struct ipa_plat_drv_res *resource_p)
 {
@@ -1548,6 +1578,10 @@
 
 	IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
 	ipa_ctx->polling_mode = polling_mode;
+	if (ipa_ctx->polling_mode)
+		atomic_set(&ipa_ctx->curr_polling_state, 1);
+	else
+		atomic_set(&ipa_ctx->curr_polling_state, 0);
 	IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
 	       hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
 	       ip6_flt_tbl_lcl);
@@ -1866,6 +1900,24 @@
 	ipa_ctx->aggregation_byte_limit = 1;
 	ipa_ctx->aggregation_time_limit = 0;
 
+	/* Initialize IPA RM (resource manager) */
+	result = ipa_rm_initialize();
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_ipa_rm_init;
+	}
+
+	a2_mux_init();
+
+	/* Initialize the tethering bridge driver */
+	result = teth_bridge_driver_init();
+	if (result) {
+		IPAERR(":teth_bridge_driver_init() failed\n");
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+
 	/* gate IPA clocks */
 	if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
 		ipa_disable_clks();
@@ -1874,6 +1926,8 @@
 
 	return 0;
 
+fail_ipa_rm_init:
+	cdev_del(&ipa_ctx->cdev);
 fail_cdev_add:
 	device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
 fail_device_create:
@@ -1981,6 +2035,18 @@
 		ipa_res.bam_mem_size = resource_size(resource_p);
 	}
 
+	/* Get IPA A2 BAM address */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
+			"a2-bam-base");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for a2-bam-base!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.a2_bam_mem_base = resource_p->start;
+		ipa_res.a2_bam_mem_size = resource_size(resource_p);
+	}
+
 	/* Get IPA pipe mem start ofst */
 	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
 			"ipa-pipe-mem");
@@ -2014,6 +2080,17 @@
 		ipa_res.bam_irq = resource_p->start;
 	}
 
+	/* Get IPA A2 BAM IRQ number */
+	resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
+			"a2-bam-irq");
+
+	if (!resource_p) {
+		IPAERR(":get resource failed for a2-bam-irq!\n");
+		return -ENODEV;
+	} else {
+		ipa_res.a2_bam_irq = resource_p->start;
+	}
+
 	/* Get IPA HW Version */
 	result = of_property_read_u32(pdev_p->dev.of_node, "qcom,ipa-hw-ver",
 					&ipa_res.ipa_hw_type);
diff --git a/drivers/platform/msm/ipa/ipa_bridge.c b/drivers/platform/msm/ipa/ipa_bridge.c
index 56e9b0d..0227ee4 100644
--- a/drivers/platform/msm/ipa/ipa_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_bridge.c
@@ -42,9 +42,6 @@
 	struct sps_connect connection;
 	struct sps_mem_buffer desc_mem_buf;
 	struct sps_register_event register_event;
-	spinlock_t spinlock;
-	u32 len;
-	u32 free_len;
 	struct list_head free_desc_list;
 };
 
@@ -162,12 +159,10 @@
 		goto fail_dma;
 	}
 
-	info->len = ~0;
-
 	list_add_tail(&info->link, &sys_rx->head_desc_list);
 	ret = sps_transfer_one(sys_rx->pipe, info->dma_address,
 			       IPA_RX_SKB_SIZE, info,
-			       SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
+			       SPS_IOVEC_FLAG_INT);
 	if (ret) {
 		list_del(&info->link);
 		dma_unmap_single(NULL, info->dma_address, IPA_RX_SKB_SIZE,
@@ -176,7 +171,6 @@
 				type, dir);
 		goto fail_dma;
 	}
-	sys_rx->len++;
 	return 0;
 
 fail_dma:
@@ -206,9 +200,6 @@
 						  link);
 			list_move_tail(&tx_pkt->link,
 					&sys_tx->free_desc_list);
-			sys_tx->len--;
-			sys_tx->free_len++;
-			tx_pkt->len = ~0;
 			cnt++;
 		}
 	} while (all);
@@ -245,7 +236,6 @@
 						  struct ipa_pkt_info,
 						  link);
 			list_del(&rx_pkt->link);
-			sys_rx->len--;
 			rx_pkt->len = iov.size;
 
 retry_alloc_tx:
@@ -285,15 +275,12 @@
 
 				list_add_tail(&tmp_pkt->link,
 						&sys_tx->free_desc_list);
-				sys_tx->free_len++;
-				tmp_pkt->len = ~0;
 			}
 
 			tx_pkt = list_first_entry(&sys_tx->free_desc_list,
 						  struct ipa_pkt_info,
 						  link);
 			list_del(&tx_pkt->link);
-			sys_tx->free_len--;
 
 retry_add_rx:
 			list_add_tail(&tx_pkt->link,
@@ -302,8 +289,7 @@
 					tx_pkt->dma_address,
 					IPA_RX_SKB_SIZE,
 					tx_pkt,
-					SPS_IOVEC_FLAG_INT |
-					SPS_IOVEC_FLAG_EOT);
+					SPS_IOVEC_FLAG_INT);
 			if (ret) {
 				list_del(&tx_pkt->link);
 				pr_debug_ratelimited("%s: sps_transfer_one failed %d type=%d dir=%d\n",
@@ -312,7 +298,6 @@
 						polling_max_sleep[dir]);
 				goto retry_add_rx;
 			}
-			sys_rx->len++;
 
 retry_add_tx:
 			list_add_tail(&rx_pkt->link,
@@ -332,7 +317,6 @@
 						polling_max_sleep[dir]);
 				goto retry_add_tx;
 			}
-			sys_tx->len++;
 			IPA_STATS_INC_BRIDGE_CNT(ctx->type, dir,
 					ipa_ctx->stats.bridged_pkts);
 		}
@@ -444,7 +428,6 @@
 
 	INIT_LIST_HEAD(&sys->head_desc_list);
 	INIT_LIST_HEAD(&sys->free_desc_list);
-	spin_lock_init(&sys->spinlock);
 
 	memset(&ipa_ctx->ep[ipa_ep_idx], 0,
 	       sizeof(struct ipa_ep_context));
@@ -614,7 +597,6 @@
 
 	INIT_LIST_HEAD(&sys->head_desc_list);
 	INIT_LIST_HEAD(&sys->free_desc_list);
-	spin_lock_init(&sys->spinlock);
 
 	if (dir == IPA_BRIDGE_DIR_DL) {
 		sys->register_event.options = SPS_O_EOT;
@@ -663,32 +645,32 @@
 	int ret;
 	int i;
 
-	bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq = alloc_workqueue("ipa_ul_teth",
-			WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq =
+		create_singlethread_workqueue("ipa_ul_teth");
 	if (!bridge[IPA_BRIDGE_TYPE_TETHERED].ul_wq) {
 		IPAERR("ipa ul teth wq alloc failed\n");
 		ret = -ENOMEM;
 		goto fail_ul_teth;
 	}
 
-	bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq = alloc_workqueue("ipa_dl_teth",
-			WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq =
+		create_singlethread_workqueue("ipa_dl_teth");
 	if (!bridge[IPA_BRIDGE_TYPE_TETHERED].dl_wq) {
 		IPAERR("ipa dl teth wq alloc failed\n");
 		ret = -ENOMEM;
 		goto fail_dl_teth;
 	}
 
-	bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq = alloc_workqueue("ipa_ul_emb",
-					 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq =
+		create_singlethread_workqueue("ipa_ul_emb");
 	if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].ul_wq) {
 		IPAERR("ipa ul emb wq alloc failed\n");
 		ret = -ENOMEM;
 		goto fail_ul_emb;
 	}
 
-	bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq = alloc_workqueue("ipa_dl_emb",
-					 WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+	bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq =
+		create_singlethread_workqueue("ipa_dl_emb");
 	if (!bridge[IPA_BRIDGE_TYPE_EMBEDDED].dl_wq) {
 		IPAERR("ipa dl emb wq alloc failed\n");
 		ret = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_debugfs.c
index ec83653..1605ed2 100644
--- a/drivers/platform/msm/ipa/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_debugfs.c
@@ -45,6 +45,37 @@
 	__stringify(IPA_CLIENT_MAX),
 };
 
+const char *ipa_ic_name[] = {
+	__stringify_1(IPA_IP_CMD_INVALID),
+	__stringify_1(IPA_DECIPH_INIT),
+	__stringify_1(IPA_PPP_FRM_INIT),
+	__stringify_1(IPA_IP_V4_FILTER_INIT),
+	__stringify_1(IPA_IP_V6_FILTER_INIT),
+	__stringify_1(IPA_IP_V4_NAT_INIT),
+	__stringify_1(IPA_IP_V6_NAT_INIT),
+	__stringify_1(IPA_IP_V4_ROUTING_INIT),
+	__stringify_1(IPA_IP_V6_ROUTING_INIT),
+	__stringify_1(IPA_HDR_INIT_LOCAL),
+	__stringify_1(IPA_HDR_INIT_SYSTEM),
+	__stringify_1(IPA_DECIPH_SETUP),
+	__stringify_1(IPA_INSERT_NAT_RULE),
+	__stringify_1(IPA_DELETE_NAT_RULE),
+	__stringify_1(IPA_NAT_DMA),
+	__stringify_1(IPA_IP_PACKET_TAG),
+	__stringify_1(IPA_IP_PACKET_INIT),
+};
+
+const char *ipa_excp_name[] = {
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
 static struct dentry *dent;
 static struct dentry *dfile_gen_reg;
 static struct dentry *dfile_ep_reg;
@@ -489,33 +520,39 @@
 	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
 			"sw_tx=%u\n"
 			"hw_tx=%u\n"
-			"rx=%u\n",
+			"rx=%u\n"
+			"rx_repl_repost=%u\n"
+			"x_intr_repost=%u\n"
+			"rx_q_len=%u\n",
 			ipa_ctx->stats.tx_sw_pkts,
 			ipa_ctx->stats.tx_hw_pkts,
-			ipa_ctx->stats.rx_pkts);
+			ipa_ctx->stats.rx_pkts,
+			ipa_ctx->stats.rx_repl_repost,
+			ipa_ctx->stats.x_intr_repost,
+			ipa_ctx->stats.rx_q_len);
 	cnt += nbytes;
 
 	for (i = 0; i < MAX_NUM_EXCP; i++) {
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-				"rx_excp[%u]=%u\n", i,
+				"rx_excp[%u:%35s]=%u\n", i, ipa_excp_name[i],
 				ipa_ctx->stats.rx_excp_pkts[i]);
 		cnt += nbytes;
 	}
 
 	for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++) {
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-				"bridged_pkt[%u][dl]=%u\n"
-				"bridged_pkt[%u][ul]=%u\n",
-				i,
+				"brg_pkt[%u:%s][dl]=%u\n"
+				"brg_pkt[%u:%s][ul]=%u\n",
+				i, (i == 0) ? "teth" : "embd",
 				ipa_ctx->stats.bridged_pkts[i][0],
-				i,
+				i, (i == 0) ? "teth" : "embd",
 				ipa_ctx->stats.bridged_pkts[i][1]);
 		cnt += nbytes;
 	}
 
 	for (i = 0; i < MAX_NUM_IMM_CMD; i++) {
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
-				"IC[%u]=%u\n", i,
+				"IC[%2u:%22s]=%u\n", i, ipa_ic_name[i],
 				ipa_ctx->stats.imm_cmds[i]);
 		cnt += nbytes;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_dp.c b/drivers/platform/msm/ipa/ipa_dp.c
index 52ed428..38690e9 100644
--- a/drivers/platform/msm/ipa/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_dp.c
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/dmapool.h>
 #include <linux/list.h>
@@ -19,6 +20,17 @@
 #define list_next_entry(pos, member) \
 	list_entry(pos->member.next, typeof(*pos), member)
 #define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY 40
+#define POLLING_MIN_SLEEP 950
+#define POLLING_MAX_SLEEP 1050
+
+static void replenish_rx_work_func(struct work_struct *work);
+static struct delayed_work replenish_rx_work;
+static void switch_to_intr_work_func(struct work_struct *work);
+static struct delayed_work switch_to_intr_work;
+static void ipa_wq_handle_rx(struct work_struct *work);
+static DECLARE_WORK(rx_work, ipa_wq_handle_rx);
+
 /**
  * ipa_write_done() - this function will be (eventually) called when a Tx
  * operation is complete
@@ -40,7 +52,7 @@
 	unsigned long irq_flags;
 	struct ipa_mem_buffer mult = { 0 };
 	int i;
-	u16 cnt;
+	u32 cnt;
 
 	tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
 	cnt = tx_pkt->cnt;
@@ -66,9 +78,8 @@
 		}
 		next_pkt = list_next_entry(tx_pkt, link);
 		list_del(&tx_pkt->link);
-		tx_pkt->sys->len--;
 		spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
-		if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+		if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
 			dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
 					tx_pkt->bounce,
 					tx_pkt->mem.phys_base);
@@ -114,10 +125,10 @@
 	u16 sps_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
 	dma_addr_t dma_address;
 	u16 len;
-	u32 mem_flag = GFP_KERNEL;
+	u32 mem_flag = GFP_ATOMIC;
 
-	if (in_atomic)
-		mem_flag = GFP_ATOMIC;
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
 
 	tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
 	if (!tx_pkt) {
@@ -125,7 +136,7 @@
 		goto fail_mem_alloc;
 	}
 
-	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+	if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
 		WARN_ON(desc->len > 512);
 
 		/*
@@ -173,19 +184,15 @@
 	if (desc->type == IPA_IMM_CMD_DESC) {
 		sps_flags |= SPS_IOVEC_FLAG_IMME;
 		len = desc->opcode;
+		IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+				desc->opcode, desc->len, sps_flags);
+		IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
 	} else {
 		len = desc->len;
 	}
 
-	if (desc->type == IPA_IMM_CMD_DESC) {
-		IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
-				desc->opcode, desc->len, sps_flags);
-		IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
-	}
-
 	spin_lock_irqsave(&sys->spinlock, irq_flags);
 	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
-	sys->len++;
 	result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
 			sps_flags);
 	if (result) {
@@ -200,7 +207,7 @@
 fail_sps_send:
 	list_del(&tx_pkt->link);
 	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
-	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
+	if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
 		dma_pool_free(ipa_ctx->one_kb_no_straddle_pool, tx_pkt->bounce,
 				dma_address);
 	else
@@ -233,7 +240,7 @@
  *
  * Return codes: 0: success, -EFAULT: failure
  */
-int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc,
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 		bool in_atomic)
 {
 	struct ipa_tx_pkt_wrapper *tx_pkt;
@@ -247,17 +254,18 @@
 	int result;
 	int fail_dma_wrap = 0;
 	uint size = num_desc * sizeof(struct sps_iovec);
-	u32 mem_flag = GFP_KERNEL;
+	u32 mem_flag = GFP_ATOMIC;
 
-	if (likely(in_atomic))
-		mem_flag = GFP_ATOMIC;
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
 
 	transfer.iovec = dma_alloc_coherent(NULL, size, &dma_addr, 0);
 	transfer.iovec_phys = dma_addr;
 	transfer.iovec_count = num_desc;
+	spin_lock_irqsave(&sys->spinlock, irq_flags);
 	if (!transfer.iovec) {
 		IPAERR("fail to alloc DMA mem for sps xfr buff\n");
-		goto failure;
+		goto failure_coherent;
 	}
 
 	for (i = 0; i < num_desc; i++) {
@@ -274,24 +282,23 @@
 		 */
 		if (i == 0) {
 			transfer.user = tx_pkt;
-
 			tx_pkt->mult.phys_base = dma_addr;
 			tx_pkt->mult.base = transfer.iovec;
 			tx_pkt->mult.size = size;
 			tx_pkt->cnt = num_desc;
+			INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
 		}
 
 		iovec = &transfer.iovec[i];
 		iovec->flags = 0;
 
 		INIT_LIST_HEAD(&tx_pkt->link);
-		INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
 		tx_pkt->type = desc[i].type;
 
 		tx_pkt->mem.base = desc[i].pyld;
 		tx_pkt->mem.size = desc[i].len;
 
-		if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
+		if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
 			WARN_ON(tx_pkt->mem.size > 512);
 
 			/*
@@ -334,10 +341,7 @@
 		 * add this packet to system pipe context.
 		 */
 		iovec->addr = tx_pkt->mem.phys_base;
-		spin_lock_irqsave(&sys->spinlock, irq_flags);
 		list_add_tail(&tx_pkt->link, &sys->head_desc_list);
-		sys->len++;
-		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
 
 		/*
 		 * Special treatment for immediate commands, where the structure
@@ -364,16 +368,15 @@
 		goto failure;
 	}
 
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
 	return 0;
 
 failure:
 	tx_pkt = transfer.user;
 	for (j = 0; j < i; j++) {
-		spin_lock_irqsave(&sys->spinlock, irq_flags);
 		next_pkt = list_next_entry(tx_pkt, link);
 		list_del(&tx_pkt->link);
-		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
-		if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
+		if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
 			dma_pool_free(ipa_ctx->one_kb_no_straddle_pool,
 					tx_pkt->bounce,
 					tx_pkt->mem.phys_base);
@@ -391,7 +394,8 @@
 	if (transfer.iovec_phys)
 		dma_free_coherent(NULL, size, transfer.iovec,
 				  transfer.iovec_phys);
-
+failure_coherent:
+	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
 	return -EFAULT;
 }
 
@@ -512,15 +516,14 @@
  *  - Call the endpoints notify function, passing the skb in the parameters
  *  - Replenish the rx cache
  */
-int ipa_handle_rx_core(bool process_all)
+int ipa_handle_rx_core(bool process_all, bool in_poll_state)
 {
 	struct ipa_a5_mux_hdr *mux_hdr;
 	struct ipa_rx_pkt_wrapper *rx_pkt;
 	struct sk_buff *rx_skb;
 	struct sps_iovec iov;
-	unsigned long irq_flags;
-	u16 pull_len;
-	u16 padding;
+	unsigned int pull_len;
+	unsigned int padding;
 	int ret;
 	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
 	struct ipa_ep_context *ep;
@@ -528,35 +531,35 @@
 	struct completion *compl;
 	struct ipa_tree_node *node;
 
-	do {
+	while ((in_poll_state ? atomic_read(&ipa_ctx->curr_polling_state) :
+				!atomic_read(&ipa_ctx->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+
 		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
 		if (ret) {
 			IPAERR("sps_get_iovec failed %d\n", ret);
 			break;
 		}
 
-		/* Break the loop when there are no more packets to receive */
 		if (iov.addr == 0)
 			break;
 
-		spin_lock_irqsave(&sys->spinlock, irq_flags);
-		if (list_empty(&sys->head_desc_list))
-			WARN_ON(1);
+		if (unlikely(list_empty(&sys->head_desc_list)))
+			continue;
+
 		rx_pkt = list_first_entry(&sys->head_desc_list,
 					  struct ipa_rx_pkt_wrapper, link);
-		if (!rx_pkt)
-			WARN_ON(1);
+
 		rx_pkt->len = iov.size;
 		sys->len--;
 		list_del(&rx_pkt->link);
-		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
 
 		IPADBG("--curr_cnt=%d\n", sys->len);
 
 		rx_skb = rx_pkt->skb;
 		dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
 				 DMA_FROM_DEVICE);
-		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
 
 		/*
 		 * make it look like a real skb, "data" was already set at
@@ -565,6 +568,7 @@
 		rx_skb->tail = rx_skb->data + rx_pkt->len;
 		rx_skb->len = rx_pkt->len;
 		rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
 
 		mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
 
@@ -601,20 +605,20 @@
 				}
 				mutex_unlock(&ipa_ctx->lock);
 			}
-			dev_kfree_skb_any(rx_skb);
+			dev_kfree_skb(rx_skb);
 			ipa_replenish_rx_cache();
 			++cnt;
 			continue;
 		}
 
-		if (mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
+		if (unlikely(mux_hdr->src_pipe_index >= IPA_NUM_PIPES ||
 			!ipa_ctx->ep[mux_hdr->src_pipe_index].valid ||
-			!ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify) {
+			!ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify)) {
 			IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
 			  mux_hdr->src_pipe_index,
 			  ipa_ctx->ep[mux_hdr->src_pipe_index].valid,
 			  ipa_ctx->ep[mux_hdr->src_pipe_index].client_notify);
-			dev_kfree_skb_any(rx_skb);
+			dev_kfree_skb(rx_skb);
 			ipa_replenish_rx_cache();
 			++cnt;
 			continue;
@@ -634,11 +638,11 @@
 
 		IPADBG("pulling %d bytes from skb\n", pull_len);
 		skb_pull(rx_skb, pull_len);
+		ipa_replenish_rx_cache();
 		ep->client_notify(ep->priv, IPA_RECEIVE,
 				(unsigned long)(rx_skb));
-		ipa_replenish_rx_cache();
 		cnt++;
-	} while (process_all);
+	};
 
 	return cnt;
 }
@@ -652,9 +656,9 @@
 	struct ipa_sys_context *sys;
 
 	IPADBG("Enter");
-	if (!ipa_ctx->curr_polling_state) {
+	if (!atomic_read(&ipa_ctx->curr_polling_state)) {
 		IPAERR("already in intr mode\n");
-		return;
+		goto fail;
 	}
 
 	sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
@@ -662,49 +666,28 @@
 	ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
 	if (ret) {
 		IPAERR("sps_get_config() failed %d\n", ret);
-		return;
+		goto fail;
 	}
 	sys->event.options = SPS_O_EOT;
 	ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
 	if (ret) {
 		IPAERR("sps_register_event() failed %d\n", ret);
-		return;
+		goto fail;
 	}
 	sys->ep->connect.options =
 		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
 	ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
 	if (ret) {
 		IPAERR("sps_set_config() failed %d\n", ret);
-		return;
+		goto fail;
 	}
-	ipa_handle_rx_core(true);
-	ipa_ctx->curr_polling_state = 0;
-}
+	atomic_set(&ipa_ctx->curr_polling_state, 0);
+	ipa_handle_rx_core(true, false);
+	return;
 
-/**
- * ipa_rx_switch_to_poll_mode() - Operate the Rx data path in polling mode
- */
-static void ipa_rx_switch_to_poll_mode(void)
-{
-	int ret;
-	struct ipa_ep_context *ep;
-
-	IPADBG("Enter");
-	ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
-
-	ret = sps_get_config(ep->ep_hdl, &ep->connect);
-	if (ret) {
-		IPAERR("sps_get_config() failed %d\n", ret);
-		return;
-	}
-	ep->connect.options =
-		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
-	ret = sps_set_config(ep->ep_hdl, &ep->connect);
-	if (ret) {
-		IPAERR("sps_set_config() failed %d\n", ret);
-		return;
-	}
-	ipa_ctx->curr_polling_state = 1;
+fail:
+	IPA_STATS_INC_CNT(ipa_ctx->stats.x_intr_repost);
+	schedule_delayed_work(&switch_to_intr_work, msecs_to_jiffies(1));
 }
 
 /**
@@ -722,16 +705,30 @@
  */
 static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
 {
-	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct ipa_ep_context *ep;
+	int ret;
 
 	IPADBG("event %d notified\n", notify->event_id);
 
 	switch (notify->event_id) {
 	case SPS_EVENT_EOT:
-		if (!ipa_ctx->curr_polling_state) {
-			ipa_rx_switch_to_poll_mode();
-			rx_pkt = notify->data.transfer.user;
-			queue_work(ipa_ctx->rx_wq, &rx_pkt->work);
+		if (!atomic_read(&ipa_ctx->curr_polling_state)) {
+			ep = ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep;
+
+			ret = sps_get_config(ep->ep_hdl, &ep->connect);
+			if (ret) {
+				IPAERR("sps_get_config() failed %d\n", ret);
+				break;
+			}
+			ep->connect.options = SPS_O_AUTO_ENABLE |
+				SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+			ret = sps_set_config(ep->ep_hdl, &ep->connect);
+			if (ret) {
+				IPAERR("sps_set_config() failed %d\n", ret);
+				break;
+			}
+			atomic_set(&ipa_ctx->curr_polling_state, 1);
+			queue_work(ipa_ctx->rx_wq, &rx_work);
 		}
 		break;
 	default:
@@ -861,6 +858,9 @@
 		/* fall through */
 	case 3:
 		sys_idx = ipa_ep_idx;
+		INIT_DELAYED_WORK(&replenish_rx_work, replenish_rx_work_func);
+		INIT_DELAYED_WORK(&switch_to_intr_work,
+				switch_to_intr_work_func);
 		break;
 	case WLAN_AMPDU_TX_EP:
 		sys_idx = IPA_A5_WLAN_AMPDU_OUT;
@@ -954,7 +954,7 @@
 		ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
 				IPA_WRITE_DONE, (unsigned long)skb);
 	else
-		dev_kfree_skb_any(skb);
+		dev_kfree_skb(skb);
 }
 
 static void ipa_tx_cmd_comp(void *user1, void *user2)
@@ -1066,6 +1066,24 @@
 }
 EXPORT_SYMBOL(ipa_tx_dp);
 
+static void ipa_handle_rx(void)
+{
+	int inactive_cycles = 0;
+	int cnt;
+
+	do {
+		cnt = ipa_handle_rx_core(true, true);
+		if (cnt == 0) {
+			inactive_cycles++;
+			usleep_range(POLLING_MIN_SLEEP, POLLING_MAX_SLEEP);
+		} else {
+			inactive_cycles = 0;
+		}
+	} while (inactive_cycles <= POLLING_INACTIVITY);
+
+	ipa_rx_switch_to_intr_mode();
+}
+
 /**
  * ipa_handle_rx() - handle packet reception. This function is executed in the
  * context of a work queue.
@@ -1074,10 +1092,9 @@
  * ipa_handle_rx_core() is run in polling mode. After all packets has been
  * received, the driver switches back to interrupt mode.
  */
-void ipa_wq_handle_rx(struct work_struct *work)
+static void ipa_wq_handle_rx(struct work_struct *work)
 {
-	ipa_handle_rx_core(true);
-	ipa_rx_switch_to_intr_mode();
+	ipa_handle_rx();
 }
 
 /**
@@ -1099,26 +1116,23 @@
 	void *ptr;
 	struct ipa_rx_pkt_wrapper *rx_pkt;
 	int ret;
-	int rx_len_cached;
-	unsigned long irq_flags;
+	int rx_len_cached = 0;
 	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
 
-	spin_lock_irqsave(&sys->spinlock, irq_flags);
 	rx_len_cached = sys->len;
-	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
 
 	while (rx_len_cached < IPA_RX_POOL_CEIL) {
 		rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
-					   GFP_KERNEL);
+					   flag);
 		if (!rx_pkt) {
 			IPAERR("failed to alloc rx wrapper\n");
-			return;
+			goto fail_kmem_cache_alloc;
 		}
 
 		INIT_LIST_HEAD(&rx_pkt->link);
-		INIT_WORK(&rx_pkt->work, ipa_wq_handle_rx);
 
-		rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, GFP_KERNEL);
+		rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, flag);
 		if (rx_pkt->skb == NULL) {
 			IPAERR("failed to alloc skb\n");
 			goto fail_skb_alloc;
@@ -1133,10 +1147,8 @@
 			goto fail_dma_mapping;
 		}
 
-		spin_lock_irqsave(&sys->spinlock, irq_flags);
 		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		rx_len_cached = ++sys->len;
-		spin_unlock_irqrestore(&sys->spinlock, irq_flags);
 
 		ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
 				       IPA_RX_SKB_SIZE, rx_pkt,
@@ -1146,27 +1158,41 @@
 			IPAERR("sps_transfer_one failed %d\n", ret);
 			goto fail_sps_transfer;
 		}
-
-		IPADBG("++curr_cnt=%d\n", sys->len);
 	}
 
+	ipa_ctx->stats.rx_q_len = sys->len;
+
 	return;
 
 fail_sps_transfer:
-	spin_lock_irqsave(&sys->spinlock, irq_flags);
 	list_del(&rx_pkt->link);
-	--sys->len;
-	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
+	rx_len_cached = --sys->len;
 	dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
 			 DMA_FROM_DEVICE);
 fail_dma_mapping:
-	dev_kfree_skb_any(rx_pkt->skb);
+	dev_kfree_skb(rx_pkt->skb);
 fail_skb_alloc:
 	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
-
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0) {
+		IPA_STATS_INC_CNT(ipa_ctx->stats.rx_repl_repost);
+		schedule_delayed_work(&replenish_rx_work,
+				msecs_to_jiffies(100));
+	}
+	ipa_ctx->stats.rx_q_len = sys->len;
 	return;
 }
 
+static void replenish_rx_work_func(struct work_struct *work)
+{
+	ipa_replenish_rx_cache();
+}
+
+static void switch_to_intr_work_func(struct work_struct *work)
+{
+	ipa_handle_rx();
+}
+
 /**
  * ipa_cleanup_rx() - release RX queue resources
  *
@@ -1175,18 +1201,15 @@
 {
 	struct ipa_rx_pkt_wrapper *rx_pkt;
 	struct ipa_rx_pkt_wrapper *r;
-	unsigned long irq_flags;
 	struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
 
-	spin_lock_irqsave(&sys->spinlock, irq_flags);
 	list_for_each_entry_safe(rx_pkt, r,
 				 &sys->head_desc_list, link) {
 		list_del(&rx_pkt->link);
 		dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
 				 DMA_FROM_DEVICE);
-		dev_kfree_skb_any(rx_pkt->skb);
+		dev_kfree_skb(rx_pkt->skb);
 		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
-	spin_unlock_irqrestore(&sys->spinlock, irq_flags);
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_i.h b/drivers/platform/msm/ipa/ipa_i.h
index 1b5b339..cb8c0f5 100644
--- a/drivers/platform/msm/ipa/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_i.h
@@ -110,7 +110,7 @@
 
 #define IPA_EVENT_THRESHOLD 0x10
 
-#define IPA_RX_POOL_CEIL 24
+#define IPA_RX_POOL_CEIL 32
 #define IPA_RX_SKB_SIZE 2048
 
 #define IPA_DFLT_HDR_NAME "ipa_excp_hdr"
@@ -422,7 +422,7 @@
 	void *user2;
 	struct ipa_sys_context *sys;
 	struct ipa_mem_buffer mult;
-	u16 cnt;
+	u32 cnt;
 	void *bounce;
 };
 
@@ -453,16 +453,14 @@
  * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
  * @skb: skb
  * @dma_address: DMA address of this Rx packet
- * @work: work struct for current Rx packet
  * @link: linked to the Rx packets on that pipe
  * @len: how many bytes are copied into skb's flat buffer
  */
 struct ipa_rx_pkt_wrapper {
 	struct sk_buff *skb;
 	dma_addr_t dma_address;
-	struct work_struct work;
 	struct list_head link;
-	u16 len;
+	u32 len;
 };
 
 /**
@@ -527,6 +525,9 @@
 	u32 rx_pkts;
 	u32 rx_excp_pkts[MAX_NUM_EXCP];
 	u32 bridged_pkts[IPA_BRIDGE_TYPE_MAX][IPA_BRIDGE_DIR_MAX];
+	u32 rx_repl_repost;
+	u32 x_intr_repost;
+	u32 rx_q_len;
 };
 
 /**
@@ -629,7 +630,7 @@
 	uint aggregation_type;
 	uint aggregation_byte_limit;
 	uint aggregation_time_limit;
-	uint curr_polling_state;
+	atomic_t curr_polling_state;
 	struct delayed_work poll_work;
 	bool hdr_tbl_lcl;
 	struct ipa_mem_buffer hdr_mem;
@@ -719,8 +720,11 @@
 	u32 ipa_mem_size;
 	u32 bam_mem_base;
 	u32 bam_mem_size;
+	u32 a2_bam_mem_base;
+	u32 a2_bam_mem_size;
 	u32 ipa_irq;
 	u32 bam_irq;
+	u32 a2_bam_irq;
 	u32 ipa_pipe_mem_start_ofst;
 	u32 ipa_pipe_mem_size;
 	enum ipa_hw_type ipa_hw_type;
@@ -733,11 +737,13 @@
 
 int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
 				struct a2_mux_pipe_connection *pipe_connect);
+int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
+			    u32 *a2_bam_irq);
 void rmnet_bridge_get_client_handles(u32 *producer_handle,
 		u32 *consumer_handle);
 int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
 		bool in_atomic);
-int ipa_send(struct ipa_sys_context *sys, u16 num_desc, struct ipa_desc *desc,
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 		bool in_atomic);
 int ipa_get_ep_mapping(enum ipa_operating_mode mode,
 		       enum ipa_client_type client);
@@ -778,8 +784,7 @@
 void ipa_cleanup_rx(void);
 int ipa_cfg_filter(u32 disable);
 void ipa_wq_write_done(struct work_struct *work);
-void ipa_wq_handle_rx(struct work_struct *work);
-int ipa_handle_rx_core(bool process_all);
+int ipa_handle_rx_core(bool process_all, bool in_poll_state);
 int ipa_pipe_mem_init(u32 start_ofst, u32 size);
 int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
 int ipa_pipe_mem_free(u32 ofst, u32 size);
@@ -815,4 +820,11 @@
 int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
 int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
 
+int a2_mux_init(void);
+int a2_mux_exit(void);
+
+void wwan_cleanup(void);
+
+int teth_bridge_driver_init(void);
+
 #endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
new file mode 100644
index 0000000..99b19cc
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -0,0 +1,374 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <mach/ipa.h>
+#include "ipa_i.h"
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_context_type {
+	struct ipa_rm_dep_graph *dep_graph;
+	struct workqueue_struct *ipa_rm_wq;
+};
+static struct ipa_rm_context_type *ipa_rm_ctx;
+
+/**
+ * ipa_rm_create_resource() - create resource
+ * @create_params: [in] parameters needed
+ *                  for resource initialization
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to initialize client's resources.
+ * This API should be called before any other IPA RM API
+ * on given resource name.
+ *
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params)
+{
+	struct ipa_rm_resource *resource;
+	int result;
+
+	if (!create_params) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  create_params->name,
+					  &resource) == 0) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_create(create_params,
+			&resource);
+	if (result)
+		goto bail;
+	result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource);
+	if (result)
+		ipa_rm_resource_delete(resource);
+bail:
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_create_resource);
+
+/**
+ * ipa_rm_add_dependency() - create dependency
+ *					between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return ipa_rm_dep_graph_add_dependency(
+			ipa_rm_ctx->dep_graph,
+			resource_name,
+			depends_on_name);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency);
+
+/**
+ * ipa_rm_delete_dependency() - create dependency
+ *					between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return ipa_rm_dep_graph_delete_dependency(
+			ipa_rm_ctx->dep_graph,
+			resource_name,
+			depends_on_name);
+}
+EXPORT_SYMBOL(ipa_rm_delete_dependency);
+
+/**
+ * ipa_rm_request_resource() - request resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
+ * on successful completion of this operation.
+ */
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name)
+{
+	struct ipa_rm_resource *resource;
+	int result;
+	IPADBG("IPA RM ::ipa_rm_request_resource ENTER\n");
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+			result = -EINVAL;
+			goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_request(
+			(struct ipa_rm_resource_prod *)resource);
+
+bail:
+	IPADBG("IPA RM ::ipa_rm_request_resource EXIT [%d]\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_request_resource);
+
+/**
+ * ipa_rm_release_resource() - release resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
+ * on successful completion of this operation.
+ */
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
+{
+	struct ipa_rm_resource *resource;
+	int result;
+	IPADBG("IPA RM ::ipa_rm_release_resource ENTER\n");
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  resource_name,
+					  &resource) != 0) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_release(
+		    (struct ipa_rm_resource_prod *)resource);
+
+bail:
+	IPADBG("IPA RM ::ipa_rm_release_resource EXIT [%d]\n", result);
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_release_resource);
+
+/**
+ * ipa_rm_register() - register for event
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided later in  ipa_rm_deregister() call.
+ */
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	int result;
+	struct ipa_rm_resource *resource;
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+				resource_name,
+				&resource) != 0) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_register(
+			(struct ipa_rm_resource_prod *)resource,
+			reg_params);
+bail:
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_register);
+
+/**
+ * ipa_rm_deregister() - cancel the registration
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided in  ipa_rm_register() call.
+ */
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	int result;
+	struct ipa_rm_resource *resource;
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_deregister(
+			(struct ipa_rm_resource_prod *)resource,
+			reg_params);
+bail:
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_deregister);
+
+/**
+ * ipa_rm_notify_completion() -
+ *	consumer driver notification for
+ *	request_resource / release_resource operations
+ *	completion
+ * @event: notified event
+ * @resource_name: resource name
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+		enum ipa_rm_resource_name resource_name)
+{
+	int result;
+	if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB,
+			resource_name,
+			event);
+	result = 0;
+bail:
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_notify_completion);
+
+static void ipa_rm_wq_handler(struct work_struct *work)
+{
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_wq_work_type *ipa_rm_work =
+			container_of(work,
+					struct ipa_rm_wq_work_type,
+					work);
+	switch (ipa_rm_work->wq_cmd) {
+	case IPA_RM_WQ_NOTIFY_PROD:
+		if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name))
+			return;
+		if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+						ipa_rm_work->resource_name,
+						&resource) != 0)
+			return;
+		ipa_rm_resource_producer_notify_clients(
+				(struct ipa_rm_resource_prod *)resource,
+				ipa_rm_work->event);
+
+		break;
+	case IPA_RM_WQ_NOTIFY_CONS:
+		break;
+	case IPA_RM_WQ_RESOURCE_CB:
+		if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+						ipa_rm_work->resource_name,
+						&resource) != 0)
+			return;
+		ipa_rm_resource_consumer_handle_cb(
+				(struct ipa_rm_resource_cons *)resource,
+				ipa_rm_work->event);
+		break;
+	default:
+		break;
+	}
+
+	kfree((void *) work);
+}
+
+/**
+ * ipa_rm_wq_send_cmd() - send a command for deferred work
+ * @wq_cmd: command that should be executed
+ * @resource_name: resource on which command should be executed
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_event event)
+{
+	int result = -ENOMEM;
+	struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (work) {
+		INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler);
+		work->wq_cmd = wq_cmd;
+		work->resource_name = resource_name;
+		work->event = event;
+		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+				(struct work_struct *)work);
+	}
+	return result;
+}
+
+/**
+ * ipa_rm_initialize() - initialize IPA RM component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_initialize(void)
+{
+	int result;
+
+	ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL);
+	if (!ipa_rm_ctx) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq");
+	if (!ipa_rm_ctx->ipa_rm_wq) {
+		result = -ENOMEM;
+		goto create_wq_fail;
+	}
+	result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph));
+	if (result)
+		goto graph_alloc_fail;
+	IPADBG("IPA RM ipa_rm_initialize SUCCESS\n");
+	return 0;
+
+graph_alloc_fail:
+	destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+create_wq_fail:
+	kfree(ipa_rm_ctx);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_exit() - free all IPA RM resources
+ */
+void ipa_rm_exit(void)
+{
+	ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph);
+	destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+	kfree(ipa_rm_ctx);
+	ipa_rm_ctx = NULL;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
new file mode 100644
index 0000000..6afab42
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
@@ -0,0 +1,208 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+
+static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name)
+{
+	int resource_index = IPA_RM_INDEX_INVALID;
+	if (IPA_RM_RESORCE_IS_PROD(resource_name))
+		resource_index = ipa_rm_prod_index(resource_name);
+	else if (IPA_RM_RESORCE_IS_CONS(resource_name))
+		resource_index = ipa_rm_cons_index(resource_name);
+
+	return resource_index;
+}
+
+/**
+ * ipa_rm_dep_graph_create() - creates graph
+ * @dep_graph: [out] created dependency graph
+ *
+ * Returns: dependency graph on success, NULL on failure
+ */
+int  ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph)
+{
+	int result = 0;
+	*dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL);
+	if (!*dep_graph) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	rwlock_init(&((*dep_graph)->lock));
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete() - destroyes the graph
+ * @graph: [in] dependency graph
+ *
+ * Frees all resources.
+ */
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph)
+{
+	int resource_index;
+	if (!graph)
+		return;
+	write_lock(&graph->lock);
+	for (resource_index = 0;
+			resource_index < IPA_RM_RESOURCE_MAX;
+			resource_index++)
+		kfree(graph->resource_table[resource_index]);
+	write_unlock(&graph->lock);
+	memset(graph->resource_table, 0, sizeof(graph->resource_table));
+}
+
+/**
+ * ipa_rm_dep_graph_get_resource() - provides a resource by name
+ * @graph: [in] dependency graph
+ * @name: [in] name of the resource
+ * @resource: [out] resource in case of success
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_get_resource(
+				struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				struct ipa_rm_resource **resource)
+{
+	int result;
+	int resource_index;
+	if (!graph) {
+		result = -EINVAL;
+		goto bail;
+	}
+	resource_index = ipa_rm_dep_get_index(resource_name);
+	if (resource_index == IPA_RM_INDEX_INVALID) {
+		result = -EINVAL;
+		goto bail;
+	}
+	read_lock(&graph->lock);
+	*resource = graph->resource_table[resource_index];
+	read_unlock(&graph->lock);
+	if (!*resource) {
+		result = -EINVAL;
+		goto bail;
+	}
+	result = 0;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_add() - adds resource to graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+			 struct ipa_rm_resource *resource)
+{
+	int result = 0;
+	int resource_index;
+	if (!graph || !resource) {
+		result = -EINVAL;
+		goto bail;
+	}
+	resource_index = ipa_rm_dep_get_index(resource->name);
+	if (resource_index == IPA_RM_INDEX_INVALID) {
+		result = -EINVAL;
+		goto bail;
+	}
+	write_lock(&graph->lock);
+	graph->resource_table[resource_index] = resource;
+	write_unlock(&graph->lock);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_add_dependency() - adds dependency between
+ *				two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to add
+ * @depends_on_name: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+				    enum ipa_rm_resource_name resource_name,
+				    enum ipa_rm_resource_name depends_on_name)
+{
+	struct ipa_rm_resource *dependant = NULL;
+	struct ipa_rm_resource *dependency = NULL;
+	int result;
+	if (!graph ||
+		!IPA_RM_RESORCE_IS_PROD(resource_name) ||
+		!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  resource_name,
+					  &dependant)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					depends_on_name,
+					  &dependency)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	result = ipa_rm_resource_add_dependency(dependant, dependency);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete_dependency() - deleted dependency between
+ *				two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to delete
+ * @depends_on_name: [in] resource to delete
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name)
+{
+	struct ipa_rm_resource *dependant = NULL;
+	struct ipa_rm_resource *dependency = NULL;
+	int result;
+	if (!graph ||
+		!IPA_RM_RESORCE_IS_PROD(resource_name) ||
+		!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  resource_name,
+					  &dependant)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  depends_on_name,
+					  &dependency)) {
+		result = -EINVAL;
+		goto bail;
+	}
+	result = ipa_rm_resource_delete_dependency(dependant, dependency);
+bail:
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
new file mode 100644
index 0000000..19d9461
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_
+#define _IPA_RM_DEPENDENCY_GRAPH_H_
+
+#include <linux/list.h>
+#include <mach/ipa.h>
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_dep_graph {
+	struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX];
+	rwlock_t lock;
+};
+
+int ipa_rm_dep_graph_get_resource(
+				struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name name,
+				struct ipa_rm_resource **resource);
+
+int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph);
+
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph);
+
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+			 struct ipa_rm_resource *resource);
+
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name);
+
+#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
new file mode 100644
index 0000000..141a442
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_I_H_
+#define _IPA_RM_I_H_
+
+#include <linux/workqueue.h>
+#include <mach/ipa.h>
+
+#define IPA_RM_RESOURCE_CONS_MAX \
+	(IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_PROD(x) \
+	(x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_CONS(x) \
+	(x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX)
+#define IPA_RM_INDEX_INVALID	(-1)
+
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name);
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name);
+
+/**
+ * enum ipa_rm_wq_cmd - workqueue commands
+ */
+enum ipa_rm_wq_cmd {
+	IPA_RM_WQ_NOTIFY_PROD,
+	IPA_RM_WQ_NOTIFY_CONS,
+	IPA_RM_WQ_RESOURCE_CB
+};
+
+/**
+ * struct ipa_rm_wq_work_type - IPA RM worqueue specific
+ *				work type
+ * @work: work struct
+ * @wq_cmd: command that should be processed in workqueue context
+ * @resource_name: name of the resource on which this work
+ *			should be done
+ * @dep_graph: data structure to search for resource if exists
+ * @event: event to notify
+ */
+struct ipa_rm_wq_work_type {
+	struct work_struct		work;
+	enum ipa_rm_wq_cmd		wq_cmd;
+	enum ipa_rm_resource_name	resource_name;
+	enum ipa_rm_event		event;
+};
+
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_event event);
+
+int ipa_rm_initialize(void);
+
+void ipa_rm_exit(void);
+
+#endif /* _IPA_RM_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
new file mode 100644
index 0000000..2a3b8d3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/unistd.h>
+#include <linux/workqueue.h>
+#include <mach/ipa.h>
+#include "ipa_i.h"
+
+/**
+ * struct ipa_rm_it_private - IPA RM Inactivity Timer private
+ *	data
+ * @initied: indicates if instance was initialized
+ * @lock - spinlock for mutual exclusion
+ * @resource_name - resource name
+ * @work: delayed work object for running delayed releas
+ *	function
+ * @release_in_prog: boolean flag indicates if release resource
+ *			is scheduled for happen in the future.
+ * @jiffies: number of jiffies for timeout
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa_rm_it_private {
+	bool initied;
+	enum ipa_rm_resource_name resource_name;
+	spinlock_t lock;
+	struct delayed_work work;
+	bool release_in_prog;
+	unsigned long jiffies;
+};
+
+static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
+
+/**
+ * ipa_rm_inactivity_timer_func() - called when timer expired in
+ * the context of the shared workqueue. Checks internally is
+ * release_in_prog flag is set and calls to
+ * ipa_rm_release_resource(). release_in_prog is cleared when
+ * calling to ipa_rm_inactivity_timer_request_resource(). In
+ * this situation this function shall not call to
+ * ipa_rm_release_resource() since the resource needs to remain
+ * up
+ *
+ * @work: work object provided by the work queue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_inactivity_timer_func(struct work_struct *work)
+{
+
+	struct ipa_rm_it_private *me = container_of(to_delayed_work(work),
+						    struct ipa_rm_it_private,
+						    work);
+	unsigned long flags;
+
+	IPADBG("%s: timer expired for resource %d!\n", __func__,
+	    me->resource_name);
+
+	/* check that release still need to be performed */
+	spin_lock_irqsave(
+		&ipa_rm_it_handles[me->resource_name].lock, flags);
+	if (ipa_rm_it_handles[me->resource_name].release_in_prog) {
+		IPADBG("%s: calling release_resource on resource %d!\n",
+		     __func__, me->resource_name);
+		ipa_rm_release_resource(me->resource_name);
+		ipa_rm_it_handles[me->resource_name].release_in_prog = false;
+	}
+	spin_unlock_irqrestore(
+		&ipa_rm_it_handles[me->resource_name].lock, flags);
+}
+
+/**
+* ipa_rm_inactivity_timer_init() - Init function for IPA RM
+* inactivity timer. This function shall be called prior calling
+* any other API of IPA RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+* @msecs: time in miliseccond, that IPA RM inactivity timer
+* shall wait prior calling to ipa_rm_release_resource().
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+				 unsigned long msecs)
+{
+	IPADBG("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPAERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ipa_rm_it_handles[resource_name].initied) {
+		IPAERR("%s: resource %d already inited\n",
+		    __func__, resource_name);
+		return -EINVAL;
+	}
+
+	spin_lock_init(&ipa_rm_it_handles[resource_name].lock);
+	ipa_rm_it_handles[resource_name].resource_name = resource_name;
+	ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs);
+	ipa_rm_it_handles[resource_name].release_in_prog = false;
+
+	INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
+			  ipa_rm_inactivity_timer_func);
+	ipa_rm_it_handles[resource_name].initied = 1;
+
+	return 0;
+}
+
+/**
+* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
+* RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
+{
+	IPADBG("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPAERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPAERR("%s: resource %d already inited\n",
+		    __func__, resource_name);
+		return -EINVAL;
+	}
+
+	memset(&ipa_rm_it_handles[resource_name], 0,
+	       sizeof(struct ipa_rm_it_private));
+
+	return 0;
+}
+
+/**
+* ipa_rm_inactivity_timer_request_resource() - Same as
+* ipa_rm_request_resource(), with a difference that calling to
+* this function will also cancel the inactivity timer, if
+* ipa_rm_inactivity_timer_release_resource() was called earlier.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_request_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	int ret;
+	unsigned long flags;
+	IPADBG("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPAERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPAERR("%s: Not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+	cancel_delayed_work(&ipa_rm_it_handles[resource_name].work);
+	ipa_rm_it_handles[resource_name].release_in_prog = false;
+	spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+	ret = ipa_rm_request_resource(resource_name);
+	IPADBG("%s: resource %d: returning %d\n", __func__, resource_name, ret);
+	return ret;
+}
+
+/**
+* ipa_rm_inactivity_timer_release_resource() - Sets the
+* inactivity timer to the timeout set by
+* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
+* RM inactivity timer will call to ipa_rm_release_resource().
+* If a call to ipa_rm_inactivity_timer_request_resource() was
+* made BEFORE the timout has expired, rge timer will be
+* cancelled.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_release_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	unsigned long flags;
+	IPADBG("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPAERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPAERR("%s: Not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+	if (ipa_rm_it_handles[resource_name].release_in_prog) {
+		IPADBG("%s: Timer already set, not scheduling again %d\n",
+		    __func__, resource_name);
+		spin_unlock_irqrestore(
+			&ipa_rm_it_handles[resource_name].lock, flags);
+		return 0;
+	}
+	ipa_rm_it_handles[resource_name].release_in_prog = true;
+	spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+
+	IPADBG("%s: setting delayed work\n", __func__);
+	schedule_delayed_work(&ipa_rm_it_handles[resource_name].work,
+			      ipa_rm_it_handles[resource_name].jiffies);
+
+	return 0;
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
new file mode 100644
index 0000000..55f8239
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
@@ -0,0 +1,247 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_i.h"
+#include "ipa_rm_i.h"
+#include "ipa_rm_resource.h"
+
+/**
+ * ipa_rm_peers_list_get_resource_index() - resource name to index
+ *	of this resource in corresponding peers list
+ * @resource_name: [in] resource name
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained in enum
+ * ipa_rm_resource_name.
+ *
+ */
+static int ipa_rm_peers_list_get_resource_index(
+		enum ipa_rm_resource_name resource_name)
+{
+	int resource_index = IPA_RM_INDEX_INVALID;
+	if (IPA_RM_RESORCE_IS_PROD(resource_name))
+		resource_index = ipa_rm_prod_index(resource_name);
+	else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		resource_index = ipa_rm_cons_index(resource_name);
+		if (resource_index != IPA_RM_INDEX_INVALID)
+			resource_index =
+				resource_index - IPA_RM_RESOURCE_PROD_MAX;
+	}
+
+	return resource_index;
+}
+
+static bool ipa_rm_peers_list_check_index(int index,
+		struct ipa_rm_peers_list *peers_list)
+{
+	return !(index > peers_list->max_peers || index < 0);
+}
+
+/**
+ * ipa_rm_peers_list_create() - creates the peers list
+ *
+ * @max_peers: maximum number of peers in new list
+ * @peers_list: [out] newly created peers list
+ *
+ * Returns: 0 in case of SUCCESS, negative otherwise
+ */
+int ipa_rm_peers_list_create(int max_peers,
+		struct ipa_rm_peers_list **peers_list)
+{
+	int result;
+	*peers_list = kzalloc(sizeof(**peers_list), GFP_KERNEL);
+	if (!*peers_list) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	rwlock_init(&(*peers_list)->peers_lock);
+	(*peers_list)->max_peers = max_peers;
+	(*peers_list)->peers = kzalloc((*peers_list)->max_peers *
+				sizeof(struct ipa_rm_resource *), GFP_KERNEL);
+	if (!((*peers_list)->peers)) {
+		result = -ENOMEM;
+		goto list_alloc_fail;
+	}
+	return 0;
+
+list_alloc_fail:
+	kfree(*peers_list);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_delete() - deletes the peers list
+ *
+ * @peers_list: peers list
+ *
+ */
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list)
+{
+	if (peers_list) {
+		kfree(peers_list->peers);
+		kfree(peers_list);
+	}
+}
+
+/**
+ * ipa_rm_peers_list_remove_peer() - removes peer from the list
+ *
+ * @peers_list: peers list
+ * @resource_name: name of the resource to remove
+ *
+ */
+void ipa_rm_peers_list_remove_peer(
+		struct ipa_rm_peers_list *peers_list,
+		enum ipa_rm_resource_name resource_name)
+{
+	if (!peers_list)
+		return;
+	write_lock(&peers_list->peers_lock);
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource_name)] = NULL;
+	peers_list->peers_count--;
+	write_unlock(&peers_list->peers_lock);
+}
+
+/**
+ * ipa_rm_peers_list_add_peer() - adds peer to the list
+ *
+ * @peers_list: peers list
+ * @resource: resource to add
+ *
+ */
+void ipa_rm_peers_list_add_peer(
+		struct ipa_rm_peers_list *peers_list,
+		struct ipa_rm_resource *resource)
+{
+	if (!peers_list || !resource)
+		return;
+	read_lock(&peers_list->peers_lock);
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource->name)] =
+			resource;
+	peers_list->peers_count++;
+	read_unlock(&peers_list->peers_lock);
+}
+
+/**
+ * ipa_rm_peers_list_is_empty() - checks
+ *	if resource peers list is empty
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list is empty, false otherwise
+ */
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list)
+{
+	bool result = true;
+	if (!peers_list)
+		goto bail;
+	read_lock(&peers_list->peers_lock);
+	if (peers_list->peers_count > 0)
+		result = false;
+	read_unlock(&peers_list->peers_lock);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_has_last_peer() - checks
+ *	if resource peers list has exactly one peer
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list has exactly one peer, false otherwise
+ */
+bool ipa_rm_peers_list_has_last_peer(
+		struct ipa_rm_peers_list *peers_list)
+{
+	bool result = true;
+	if (!peers_list)
+		goto bail;
+	read_lock(&peers_list->peers_lock);
+	if (peers_list->peers_count == 1)
+		result = false;
+	read_unlock(&peers_list->peers_lock);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_check_dependency() - check dependency
+ *	between 2 peer lists
+ * @resource_peers: first peers list
+ * @resource_name: first peers list resource name
+ * @depends_on_peers: second peers list
+ * @depends_on_name: second peers list resource name
+ *
+ * Returns: true if there is dependency, false otherwise
+ *
+ */
+bool ipa_rm_peers_list_check_dependency(
+		struct ipa_rm_peers_list *resource_peers,
+		enum ipa_rm_resource_name resource_name,
+		struct ipa_rm_peers_list *depends_on_peers,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	bool result = false;
+	if (!resource_peers || !depends_on_peers)
+		return result;
+	read_lock(&resource_peers->peers_lock);
+	if (resource_peers->peers[ipa_rm_peers_list_get_resource_index(
+			depends_on_name)] != NULL)
+		result = true;
+	read_unlock(&resource_peers->peers_lock);
+
+	read_lock(&depends_on_peers->peers_lock);
+	if (depends_on_peers->peers[ipa_rm_peers_list_get_resource_index(
+						resource_name)] != NULL)
+		result = true;
+	read_unlock(&depends_on_peers->peers_lock);
+
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_resource() - get resource by
+ *	resource index
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: the resource if found, NULL otherwise
+ */
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+		struct ipa_rm_peers_list *resource_peers)
+{
+	struct ipa_rm_resource *result = NULL;
+	if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+		goto bail;
+	read_lock(&resource_peers->peers_lock);
+	result = resource_peers->peers[resource_index];
+	read_unlock(&resource_peers->peers_lock);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_size() - get peers list sise
+ *
+ * @peers_list: peers list
+ *
+ * Returns: the size of the peers list
+ */
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list)
+{
+	return peers_list->max_peers;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
new file mode 100644
index 0000000..f8fd1ca
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_PEERS_LIST_H_
+#define _IPA_RM_PEERS_LIST_H_
+
+#include "ipa_rm_resource.h"
+
+/**
+ * struct ipa_rm_peers_list - IPA RM resource peers list
+ * @peers: the list of references to resources dependent on this resource
+ *          in case of producer or list of dependencies in case of consumer
+ * @max_peers: maximum number of peers for this resource
+ * @peers_count: actual number of peers for this resource
+ * @peers_lock: RW lock for peers container
+ */
+struct ipa_rm_peers_list {
+	struct ipa_rm_resource		**peers;
+	int				max_peers;
+	int				peers_count;
+	rwlock_t			peers_lock;
+};
+
+int ipa_rm_peers_list_create(int max_peers,
+		struct ipa_rm_peers_list **peers_list);
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list);
+void ipa_rm_peers_list_remove_peer(
+		struct ipa_rm_peers_list *peers_list,
+		enum ipa_rm_resource_name resource_name);
+void ipa_rm_peers_list_add_peer(
+		struct ipa_rm_peers_list *peers_list,
+		struct ipa_rm_resource *resource);
+bool ipa_rm_peers_list_check_dependency(
+		struct ipa_rm_peers_list *resource_peers,
+		enum ipa_rm_resource_name resource_name,
+		struct ipa_rm_peers_list *depends_on_peers,
+		enum ipa_rm_resource_name depends_on_name);
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+		struct ipa_rm_peers_list *peers_list);
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_has_last_peer(
+		struct ipa_rm_peers_list *peers_list);
+
+
+#endif /* _IPA_RM_PEERS_LIST_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
new file mode 100644
index 0000000..3ba8e84
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -0,0 +1,809 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_i.h"
+#include "ipa_rm_resource.h"
+#include "ipa_rm_i.h"
+
+/**
+ * ipa_rm_dep_prod_index() - producer name to producer index mapping
+ * @resource_name: [in] resource name (should be of producer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ *	in case provided resource name isn't contained
+ *	in enum ipa_rm_resource_name or is not of producers.
+ *
+ */
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name)
+{
+	int result = resource_name;
+	switch (resource_name) {
+	case IPA_RM_RESOURCE_BRIDGE_PROD:
+	case IPA_RM_RESOURCE_A2_PROD:
+	case IPA_RM_RESOURCE_USB_PROD:
+	case IPA_RM_RESOURCE_HSIC_PROD:
+	case IPA_RM_RESOURCE_STD_ECM_PROD:
+	case IPA_RM_RESOURCE_WWAN_0_PROD:
+	case IPA_RM_RESOURCE_WWAN_1_PROD:
+	case IPA_RM_RESOURCE_WWAN_2_PROD:
+	case IPA_RM_RESOURCE_WWAN_3_PROD:
+	case IPA_RM_RESOURCE_WWAN_4_PROD:
+	case IPA_RM_RESOURCE_WWAN_5_PROD:
+	case IPA_RM_RESOURCE_WWAN_6_PROD:
+	case IPA_RM_RESOURCE_WWAN_7_PROD:
+	case IPA_RM_RESOURCE_WLAN_PROD:
+		break;
+	default:
+		result = IPA_RM_INDEX_INVALID;
+		break;
+	}
+	return result;
+}
+
+/**
+ * ipa_rm_cons_index() - consumer name to consumer index mapping
+ * @resource_name: [in] resource name (should be of consumer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ *	in case provided resource name isn't contained
+ *	in enum ipa_rm_resource_name or is not of consumers.
+ *
+ */
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name)
+{
+	int result = resource_name;
+	switch (resource_name) {
+	case IPA_RM_RESOURCE_A2_CONS:
+	case IPA_RM_RESOURCE_USB_CONS:
+	case IPA_RM_RESOURCE_HSIC_CONS:
+		break;
+	default:
+		result = IPA_RM_INDEX_INVALID;
+		break;
+	}
+	return result;
+}
+
+static int ipa_rm_resource_consumer_request(
+		struct ipa_rm_resource_cons *consumer)
+{
+	int result = 0;
+	int driver_result;
+	unsigned long flags;
+	IPADBG("IPA RM ::ipa_rm_resource_consumer_request ENTER\n");
+	spin_lock_irqsave(&consumer->resource.state_lock, flags);
+	switch (consumer->resource.state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+	{
+		enum ipa_rm_resource_state prev_state =
+						consumer->resource.state;
+		consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+		spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+		driver_result = consumer->request_resource();
+		spin_lock_irqsave(&consumer->resource.state_lock, flags);
+		if (driver_result == 0)
+			consumer->resource.state = IPA_RM_GRANTED;
+		else if (driver_result != -EINPROGRESS) {
+			consumer->resource.state = prev_state;
+			result = driver_result;
+			goto bail;
+		}
+		result = driver_result;
+		break;
+	}
+	case IPA_RM_GRANTED:
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		result = -EINPROGRESS;
+		break;
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+	consumer->usage_count++;
+bail:
+	spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+	IPADBG("IPA RM ::ipa_rm_resource_consumer_request EXIT [%d]\n", result);
+	return result;
+}
+
+static int ipa_rm_resource_consumer_release(
+		struct ipa_rm_resource_cons *consumer)
+{
+	int result = 0;
+	int driver_result;
+	unsigned long flags;
+	enum ipa_rm_resource_state save_state;
+	IPADBG("IPA RM ::ipa_rm_resource_consumer_release ENTER\n");
+	spin_lock_irqsave(&consumer->resource.state_lock, flags);
+	switch (consumer->resource.state) {
+	case IPA_RM_RELEASED:
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (consumer->usage_count > 0)
+			consumer->usage_count--;
+		if (consumer->usage_count == 0) {
+			save_state = consumer->resource.state;
+			consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+			spin_unlock_irqrestore(&consumer->resource.state_lock,
+					flags);
+			driver_result = consumer->release_resource();
+			spin_lock_irqsave(&consumer->resource.state_lock,
+					flags);
+			if (driver_result == 0)
+				consumer->resource.state = IPA_RM_RELEASED;
+			else if (driver_result != -EINPROGRESS)
+				consumer->resource.state = save_state;
+			result = driver_result;
+		}
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (consumer->usage_count > 0)
+			consumer->usage_count--;
+		result = -EINPROGRESS;
+		break;
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+bail:
+	spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+	IPADBG("IPA RM ::ipa_rm_resource_consumer_release EXIT [%d]\n", result);
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_notify_clients() - notify
+ *	all registered clients of given producer
+ * @producer: producer
+ * @event: event to notify
+ */
+void ipa_rm_resource_producer_notify_clients(
+				struct ipa_rm_resource_prod *producer,
+				enum ipa_rm_event event)
+{
+	struct ipa_rm_notification_info *reg_info, *reg_info_cloned;
+	struct list_head *pos, *q;
+	LIST_HEAD(cloned_list);
+	read_lock(&producer->event_listeners_lock);
+	list_for_each(pos, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+					struct ipa_rm_notification_info,
+					link);
+		reg_info_cloned = kzalloc(sizeof(*reg_info_cloned), GFP_ATOMIC);
+		if (!reg_info_cloned)
+			goto clone_list_failed;
+		reg_info_cloned->reg_params.notify_cb =
+				reg_info->reg_params.notify_cb;
+		reg_info_cloned->reg_params.user_data =
+				reg_info->reg_params.user_data;
+		list_add(&reg_info_cloned->link, &cloned_list);
+	}
+	read_unlock(&producer->event_listeners_lock);
+	list_for_each_safe(pos, q, &cloned_list) {
+		reg_info = list_entry(pos,
+					struct ipa_rm_notification_info,
+					link);
+		reg_info->reg_params.notify_cb(
+				reg_info->reg_params.user_data,
+				event,
+				0);
+		list_del(pos);
+		kfree(reg_info);
+	}
+	return;
+clone_list_failed:
+	read_unlock(&producer->event_listeners_lock);
+}
+
+static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource,
+		struct ipa_rm_resource_prod **producer,
+		struct ipa_rm_create_params *create_params,
+		int *max_peers)
+{
+	int result = 0;
+	*producer = kzalloc(sizeof(**producer), GFP_KERNEL);
+	if (*producer == NULL) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	rwlock_init(&(*producer)->event_listeners_lock);
+	INIT_LIST_HEAD(&((*producer)->event_listeners));
+	result = ipa_rm_resource_producer_register(*producer,
+			&(create_params->reg_params));
+	if (result)
+		goto register_fail;
+	(*resource) = (struct ipa_rm_resource *) (*producer);
+	(*resource)->type = IPA_RM_PRODUCER;
+	*max_peers = IPA_RM_RESOURCE_CONS_MAX;
+	goto bail;
+register_fail:
+	kfree(*producer);
+bail:
+	return result;
+}
+
+static void ipa_rm_resource_producer_delete(
+				struct ipa_rm_resource_prod *producer)
+{
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos, *q;
+	write_lock(&producer->event_listeners_lock);
+	list_for_each_safe(pos, q, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+				struct ipa_rm_notification_info,
+				link);
+		list_del(pos);
+		kfree(reg_info);
+	}
+	write_unlock(&producer->event_listeners_lock);
+}
+
+static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource,
+		struct ipa_rm_resource_cons **consumer,
+		struct ipa_rm_create_params *create_params,
+		int *max_peers)
+{
+	int result = 0;
+	*consumer = kzalloc(sizeof(**consumer), GFP_KERNEL);
+	if (*consumer == NULL) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	(*consumer)->request_resource = create_params->request_resource;
+	(*consumer)->release_resource = create_params->release_resource;
+	(*resource) = (struct ipa_rm_resource *) (*consumer);
+	(*resource)->type = IPA_RM_CONSUMER;
+	*max_peers = IPA_RM_RESOURCE_PROD_MAX;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_create() - creates resource
+ * @create_params: [in] parameters needed
+ *			for resource initialization with IPA RM
+ * @resource: [out] created resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_create(
+		struct ipa_rm_create_params *create_params,
+		struct ipa_rm_resource **resource)
+{
+	struct ipa_rm_resource_cons *consumer;
+	struct ipa_rm_resource_prod *producer;
+	int max_peers;
+	int result = 0;
+
+	if (!create_params) {
+		result = -EINVAL;
+		goto bail;
+	}
+	if (IPA_RM_RESORCE_IS_PROD(create_params->name)) {
+		result = ipa_rm_resource_producer_create(resource,
+				&producer,
+				create_params,
+				&max_peers);
+		if (result)
+			goto bail;
+	} else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) {
+		result = ipa_rm_resource_consumer_create(resource,
+				&consumer,
+				create_params,
+				&max_peers);
+		if (result)
+			goto bail;
+	} else {
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_peers_list_create(max_peers,
+			&((*resource)->peers_list));
+	if (result)
+		goto peers_alloc_fail;
+	(*resource)->name = create_params->name;
+	(*resource)->state = IPA_RM_RELEASED;
+	spin_lock_init(&((*resource)->state_lock));
+	goto bail;
+peers_alloc_fail:
+	ipa_rm_resource_delete(*resource);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_delete() - deletes resource
+ * @resource: [in] resource
+ *			for resource initialization with IPA RM
+ */
+void ipa_rm_resource_delete(struct ipa_rm_resource *resource)
+{
+	if (!resource)
+		return;
+	if (resource->peers_list)
+		ipa_rm_peers_list_delete(resource->peers_list);
+	if (resource->type == IPA_RM_PRODUCER) {
+		ipa_rm_resource_producer_delete(
+				(struct ipa_rm_resource_prod *) resource);
+		kfree((struct ipa_rm_resource_prod *) resource);
+	} else
+		kfree((struct ipa_rm_resource_cons *) resource);
+}
+
+/**
+ * ipa_rm_resource_register() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ *
+ */
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+		struct ipa_rm_register_params *reg_params)
+{
+	int result = 0;
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos;
+	if (!producer || !reg_params) {
+		result = -EPERM;
+		goto bail;
+	}
+	read_lock(&producer->event_listeners_lock);
+	list_for_each(pos, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+					struct ipa_rm_notification_info,
+					link);
+		if (reg_info->reg_params.notify_cb ==
+						reg_params->notify_cb) {
+			result = -EPERM;
+			read_unlock(&producer->event_listeners_lock);
+			goto bail;
+		}
+
+	}
+	read_unlock(&producer->event_listeners_lock);
+	reg_info = kzalloc(sizeof(*reg_info), GFP_KERNEL);
+	if (reg_info == NULL) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	reg_info->reg_params.user_data = reg_params->user_data;
+	reg_info->reg_params.notify_cb = reg_params->notify_cb;
+	INIT_LIST_HEAD(&reg_info->link);
+	write_lock(&producer->event_listeners_lock);
+	list_add(&reg_info->link, &producer->event_listeners);
+	write_unlock(&producer->event_listeners_lock);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_deregister() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ * This function deleted only single instance of
+ * registration info.
+ *
+ */
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+		struct ipa_rm_register_params *reg_params)
+{
+	int result = -EINVAL;
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos, *q;
+	if (!producer || !reg_params)
+		return -EINVAL;
+	write_lock(&producer->event_listeners_lock);
+	list_for_each_safe(pos, q, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+				struct ipa_rm_notification_info,
+				link);
+		if (reg_info->reg_params.notify_cb ==
+						reg_params->notify_cb) {
+			list_del(pos);
+			kfree(reg_info);
+			result = 0;
+			goto bail;
+		}
+
+	}
+bail:
+	write_unlock(&producer->event_listeners_lock);
+	return result;
+}
+
+/**
+ * ipa_rm_resource_add_dependency() - add dependency between two
+ *				given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on)
+{
+	int result = 0;
+	unsigned long flags;
+	int consumer_result;
+	if (!resource || !depends_on)
+		return -EINVAL;
+	if (ipa_rm_peers_list_check_dependency(resource->peers_list,
+			resource->name,
+			depends_on->peers_list,
+			depends_on->name))
+		return -EINVAL;
+	ipa_rm_peers_list_add_peer(resource->peers_list, depends_on);
+	ipa_rm_peers_list_add_peer(depends_on->peers_list, resource);
+	spin_lock_irqsave(&resource->state_lock, flags);
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+	{
+		enum ipa_rm_resource_state prev_state = resource->state;
+		resource->state = IPA_RM_REQUEST_IN_PROGRESS;
+		((struct ipa_rm_resource_prod *)
+					resource)->pending_request++;
+		spin_unlock_irqrestore(&resource->state_lock, flags);
+		consumer_result = ipa_rm_resource_consumer_request(
+				(struct ipa_rm_resource_cons *)depends_on);
+		spin_lock_irqsave(&resource->state_lock, flags);
+		if (consumer_result != -EINPROGRESS)
+			resource->state = prev_state;
+			((struct ipa_rm_resource_prod *)
+					resource)->pending_request--;
+		result = consumer_result;
+		break;
+	}
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+bail:
+	spin_unlock_irqrestore(&resource->state_lock, flags);
+	IPADBG("IPA RM ipa_rm_resource_add_dependency name[%d]count[%d]EXIT\n",
+			resource->name, resource->peers_list->peers_count);
+	IPADBG("IPA RM ipa_rm_resource_add_dependency name[%d]count[%d]EXIT\n",
+			depends_on->name, depends_on->peers_list->peers_count);
+	return result;
+}
+
+/**
+ * ipa_rm_resource_delete_dependency() - add dependency between two
+ *				given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ * EINPROGRESS is returned in case this is the last dependency
+ * of given resource and IPA RM client should receive the RELEASED cb
+ */
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on)
+{
+	int result = 0;
+	unsigned long flags;
+	if (!resource || !depends_on)
+		return -EINVAL;
+	if (ipa_rm_peers_list_check_dependency(resource->peers_list,
+			resource->name,
+			depends_on->peers_list,
+			depends_on->name))
+		return -EINVAL;
+	spin_lock_irqsave(&resource->state_lock, flags);
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_GRANTED:
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (((struct ipa_rm_resource_prod *)
+				resource)->pending_release > 0)
+			((struct ipa_rm_resource_prod *)
+					resource)->pending_release--;
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (((struct ipa_rm_resource_prod *)
+				resource)->pending_request > 0)
+			((struct ipa_rm_resource_prod *)
+					resource)->pending_request--;
+		break;
+	default:
+		result = -EINVAL;
+		spin_unlock_irqrestore(&resource->state_lock, flags);
+		goto bail;
+	}
+	spin_unlock_irqrestore(&resource->state_lock, flags);
+	(void) ipa_rm_resource_consumer_release(
+			(struct ipa_rm_resource_cons *)depends_on);
+	if (ipa_rm_peers_list_has_last_peer(resource->peers_list)) {
+		(void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+				resource->name,
+				IPA_RM_RESOURCE_RELEASED);
+		result = -EINPROGRESS;
+	}
+	ipa_rm_peers_list_remove_peer(resource->peers_list,
+			depends_on->name);
+	ipa_rm_peers_list_remove_peer(depends_on->peers_list,
+			resource->name);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_request() - producer resource request
+ * @producer: [in] producer
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer)
+{
+	int peers_index;
+	int result = 0;
+	unsigned long flags;
+	struct ipa_rm_resource *consumer;
+	int consumer_result;
+	IPADBG("IPA RM ::ipa_rm_resource_producer_request [%d] ENTER\n",
+			producer->resource.name);
+	if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
+		spin_lock_irqsave(&producer->resource.state_lock, flags);
+		producer->resource.state = IPA_RM_GRANTED;
+		spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+		return 0;
+	}
+	spin_lock_irqsave(&producer->resource.state_lock, flags);
+	IPADBG("IPA RM ::ipa_rm_resource_producer_request state [%d]\n",
+			producer->resource.state);
+	switch (producer->resource.state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+		break;
+	case IPA_RM_GRANTED:
+		goto unlock_and_bail;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		result = -EINPROGRESS;
+		goto unlock_and_bail;
+	default:
+		result = -EINVAL;
+		goto unlock_and_bail;
+	}
+	producer->pending_request = 0;
+	spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				producer->resource.peers_list);
+		peers_index++) {
+		consumer = ipa_rm_peers_list_get_resource(peers_index,
+				producer->resource.peers_list);
+		if (consumer) {
+			spin_lock_irqsave(
+				&producer->resource.state_lock, flags);
+			producer->pending_request++;
+			spin_unlock_irqrestore(
+				&producer->resource.state_lock, flags);
+			consumer_result = ipa_rm_resource_consumer_request(
+				(struct ipa_rm_resource_cons *)consumer);
+			if (consumer_result == -EINPROGRESS) {
+				result = -EINPROGRESS;
+			} else {
+				spin_lock_irqsave(
+					&producer->resource.state_lock, flags);
+				producer->pending_request--;
+				spin_unlock_irqrestore(
+					&producer->resource.state_lock, flags);
+				if (consumer_result != 0) {
+					result = consumer_result;
+					goto bail;
+				}
+			}
+		}
+	}
+	spin_lock_irqsave(&producer->resource.state_lock, flags);
+	if (producer->pending_request == 0)
+		producer->resource.state = IPA_RM_GRANTED;
+	spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+	return result;
+unlock_and_bail:
+	spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+bail:
+	IPADBG("IPA RM ::ipa_rm_resource_producer_request EXIT[%d]\n", result);
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_release() - producer resource release
+ * producer: [in] producer resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer)
+{
+	int peers_index;
+	int result = 0;
+	unsigned long flags;
+	struct ipa_rm_resource *consumer;
+	int consumer_result;
+	IPADBG("IPA RM ::ipa_rm_resource_producer_release ENTER\n");
+	if (ipa_rm_peers_list_is_empty(producer->resource.peers_list)) {
+		spin_lock_irqsave(&producer->resource.state_lock, flags);
+		producer->resource.state = IPA_RM_RELEASED;
+		spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+		return 0;
+	}
+	spin_lock_irqsave(&producer->resource.state_lock, flags);
+	switch (producer->resource.state) {
+	case IPA_RM_RELEASED:
+		goto bail;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		result = -EINPROGRESS;
+		goto bail;
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+	producer->pending_release = 0;
+	spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				producer->resource.peers_list);
+		peers_index++) {
+		consumer = ipa_rm_peers_list_get_resource(peers_index,
+				producer->resource.peers_list);
+		if (consumer) {
+			spin_lock_irqsave(
+				&producer->resource.state_lock, flags);
+			producer->pending_release++;
+			spin_unlock_irqrestore(
+				&producer->resource.state_lock, flags);
+			consumer_result = ipa_rm_resource_consumer_release(
+				(struct ipa_rm_resource_cons *)consumer);
+			if (consumer_result == -EINPROGRESS) {
+				result = -EINPROGRESS;
+			} else {
+				spin_lock_irqsave(
+					&producer->resource.state_lock, flags);
+				producer->pending_release--;
+				spin_unlock_irqrestore(
+					&producer->resource.state_lock, flags);
+			}
+		}
+	}
+	spin_lock_irqsave(&producer->resource.state_lock, flags);
+	if (producer->pending_release == 0)
+		producer->resource.state = IPA_RM_RELEASED;
+	spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+	return result;
+bail:
+	spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+	IPADBG("IPA RM ::ipa_rm_resource_producer_release EXIT[%d]\n", result);
+	return result;
+}
+
+static void ipa_rm_resource_producer_handle_cb(
+		struct ipa_rm_resource_prod *producer,
+		enum ipa_rm_event event)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&producer->resource.state_lock, flags);
+	switch (producer->resource.state) {
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (event != IPA_RM_RESOURCE_GRANTED)
+			goto unlock_and_bail;
+		if (producer->pending_request > 0) {
+			producer->pending_request--;
+			if (producer->pending_request == 0) {
+				producer->resource.state =
+						IPA_RM_GRANTED;
+				spin_unlock_irqrestore(
+					&producer->resource.state_lock, flags);
+				ipa_rm_resource_producer_notify_clients(
+						producer,
+						IPA_RM_RESOURCE_GRANTED);
+				goto bail;
+			}
+		}
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (event != IPA_RM_RESOURCE_RELEASED)
+			goto unlock_and_bail;
+		if (producer->pending_release > 0) {
+			producer->pending_release--;
+			if (producer->pending_release == 0) {
+				producer->resource.state =
+						IPA_RM_RELEASED;
+				spin_unlock_irqrestore(
+					&producer->resource.state_lock, flags);
+				ipa_rm_resource_producer_notify_clients(
+						producer,
+						IPA_RM_RESOURCE_RELEASED);
+				goto bail;
+			}
+		}
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_RELEASED:
+	default:
+		goto unlock_and_bail;
+	}
+unlock_and_bail:
+	spin_unlock_irqrestore(&producer->resource.state_lock, flags);
+bail:
+	return;
+}
+
+/**
+ * ipa_rm_resource_consumer_handle_cb() - propagates resource
+ *	notification to all dependent producers
+ * @consumer: [in] notifying resource
+ *
+ */
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+				enum ipa_rm_event event)
+{
+	int peers_index;
+	struct ipa_rm_resource *producer;
+	unsigned long flags;
+	if (!consumer)
+		return;
+	spin_lock_irqsave(&consumer->resource.state_lock, flags);
+	switch (consumer->resource.state) {
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (event == IPA_RM_RESOURCE_RELEASED)
+			goto bail;
+		consumer->resource.state = IPA_RM_GRANTED;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (event == IPA_RM_RESOURCE_GRANTED)
+			goto bail;
+		consumer->resource.state = IPA_RM_RELEASED;
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_RELEASED:
+	default:
+		goto bail;
+	}
+	spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				consumer->resource.peers_list);
+		peers_index++) {
+		producer = ipa_rm_peers_list_get_resource(peers_index,
+				consumer->resource.peers_list);
+		if (producer)
+			ipa_rm_resource_producer_handle_cb(
+					(struct ipa_rm_resource_prod *)
+						producer,
+						event);
+	}
+	return;
+bail:
+	spin_unlock_irqrestore(&consumer->resource.state_lock, flags);
+	return;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h
new file mode 100644
index 0000000..b9c2e91
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_RESOURCE_H_
+#define _IPA_RM_RESOURCE_H_
+
+#include <linux/list.h>
+#include <mach/ipa.h>
+#include "ipa_rm_peers_list.h"
+
+/**
+ * enum ipa_rm_resource_state - resource state
+ */
+enum ipa_rm_resource_state {
+	IPA_RM_RELEASED,
+	IPA_RM_REQUEST_IN_PROGRESS,
+	IPA_RM_GRANTED,
+	IPA_RM_RELEASE_IN_PROGRESS
+};
+
+/**
+ * enum ipa_rm_resource_type - IPA resource manager resource type
+ */
+enum ipa_rm_resource_type {
+	IPA_RM_PRODUCER,
+	IPA_RM_CONSUMER
+};
+
+/**
+ * struct ipa_rm_notification_info - notification information
+ *				of IPA RM client
+ * @reg_params: registration parameters
+ * @link: link to the list of all registered clients information
+ */
+struct ipa_rm_notification_info {
+	struct ipa_rm_register_params	reg_params;
+	struct list_head		link;
+};
+
+/**
+ * struct ipa_rm_resource - IPA RM resource
+ * @name: name identifying resource
+ * @state: state of the resource
+ * @state_lock: lock for all resource state related variables
+ * @peers_list: list of the peers of the resource
+ */
+struct ipa_rm_resource {
+	enum ipa_rm_resource_name	name;
+	enum ipa_rm_resource_type	type;
+	enum ipa_rm_resource_state	state;
+	spinlock_t			state_lock;
+	struct ipa_rm_peers_list	*peers_list;
+};
+
+/**
+ * struct ipa_rm_resource_cons - IPA RM consumer
+ * @resource: resource
+ * @usage_count: number of producers in GRANTED / REQUESTED state
+ *		using this consumer
+ * @request_resource: function which should be called to request resource
+ *			from resource manager
+ * @release_resource: function which should be called to release resource
+ *			from resource manager
+ * Add new fields after @resource only.
+ */
+struct ipa_rm_resource_cons {
+	struct ipa_rm_resource resource;
+	int usage_count;
+	int (*request_resource)(void);
+	int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_resource_prod - IPA RM producer
+ * @resource: resource
+ * @event_listeners: clients registered with this producer
+ *		for notifications in resource state
+ * @event_listeners_lock: RW lock protecting the event listeners list
+ * Add new fields after @resource only.
+ */
+struct ipa_rm_resource_prod {
+	struct ipa_rm_resource	resource;
+	struct list_head	event_listeners;
+	rwlock_t		event_listeners_lock;
+	int			pending_request;
+	int			pending_release;
+};
+
+int ipa_rm_resource_create(
+		struct ipa_rm_create_params *create_params,
+		struct ipa_rm_resource **resource);
+
+void ipa_rm_resource_delete(struct ipa_rm_resource *resource);
+
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+				struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+				struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on);
+
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+				      struct ipa_rm_resource *depends_on);
+
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer);
+
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+				enum ipa_rm_event event);
+
+void ipa_rm_resource_producer_notify_clients(
+				struct ipa_rm_resource_prod *producer,
+				enum ipa_rm_event event);
+
+#endif /* _IPA_RM_RESOURCE_H_ */
diff --git a/drivers/platform/msm/ipa/rmnet_bridge.c b/drivers/platform/msm/ipa/rmnet_bridge.c
index e5c7ec2..696b363 100644
--- a/drivers/platform/msm/ipa/rmnet_bridge.c
+++ b/drivers/platform/msm/ipa/rmnet_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
 #include <mach/bam_dmux.h>
 #include <mach/ipa.h>
 #include <mach/sps.h>
-#include "a2_service.h"
 
 static struct rmnet_bridge_cb_type {
 	u32 producer_handle;
diff --git a/drivers/platform/msm/ipa/teth_bridge.c b/drivers/platform/msm/ipa/teth_bridge.c
new file mode 100644
index 0000000..76e2eee
--- /dev/null
+++ b/drivers/platform/msm/ipa/teth_bridge.c
@@ -0,0 +1,1483 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <mach/bam_dmux.h>
+#include <mach/ipa.h>
+#include <mach/sps.h>
+#include "ipa_i.h"
+
+#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
+
+#ifdef TETH_DEBUG
+#define TETH_DBG(fmt, args...) \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
+		 __func__, __LINE__, ## args)
+#define TETH_DBG_FUNC_ENTRY() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
+#define TETH_DBG_FUNC_EXIT() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
+#else
+#define TETH_DBG(fmt, args...)
+#define TETH_DBG_FUNC_ENTRY()
+#define TETH_DBG_FUNC_EXIT()
+#endif
+
+#define TETH_ERR(fmt, args...) \
+	pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define USB_ETH_HDR_NAME_IPV4 "usb_bridge_ipv4"
+#define USB_ETH_HDR_NAME_IPV6 "usb_bridge_ipv6"
+#define A2_ETH_HDR_NAME_IPV4  "a2_bridge_ipv4"
+#define A2_ETH_HDR_NAME_IPV6  "a2_bridge_ipv6"
+
+#define USB_TO_A2_RT_TBL_NAME_IPV4 "usb_a2_rt_tbl_ipv4"
+#define A2_TO_USB_RT_TBL_NAME_IPV4 "a2_usb_rt_tbl_ipv4"
+#define USB_TO_A2_RT_TBL_NAME_IPV6 "usb_a2_rt_tbl_ipv6"
+#define A2_TO_USB_RT_TBL_NAME_IPV6 "a2_usb_rt_tbl_ipv6"
+
+#define MBIM_HEADER_NAME "mbim_header"
+#define TETH_DEFAULT_AGGR_TIME_LIMIT 1
+
+#define ETHERTYPE_IPV4 0x0800
+#define ETHERTYPE_IPV6 0x86DD
+
+struct mac_addresses_type {
+	u8 host_pc_mac_addr[ETH_ALEN];
+	bool host_pc_mac_addr_known;
+	u8 device_mac_addr[ETH_ALEN];
+	bool device_mac_addr_known;
+};
+
+struct teth_bridge_ctx {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	u32 usb_ipa_pipe_hdl;
+	u32 ipa_usb_pipe_hdl;
+	u32 a2_ipa_pipe_hdl;
+	u32 ipa_a2_pipe_hdl;
+	bool is_connected;
+	enum teth_link_protocol_type link_protocol;
+	struct mac_addresses_type mac_addresses;
+	bool is_hw_bridge_complete;
+	struct teth_aggr_params aggr_params;
+	bool aggr_params_known;
+	enum teth_tethering_mode tethering_mode;
+	struct completion is_bridge_prod_up;
+	struct completion is_bridge_prod_down;
+	struct work_struct comp_hw_bridge_work;
+	bool comp_hw_bridge_in_progress;
+	struct teth_aggr_capabilities *aggr_caps;
+};
+
+static struct teth_bridge_ctx *teth_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define TETH_MAX_MSG_LEN 512
+static char dbg_buff[TETH_MAX_MSG_LEN];
+#endif
+
+static int add_eth_hdrs(char *hdr_name_ipv4, char *hdr_name_ipv6,
+			u8 *src_mac_addr, u8 *dst_mac_addr)
+{
+	int res;
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ethhdr hdr_ipv4;
+	struct ethhdr hdr_ipv6;
+
+	TETH_DBG_FUNC_ENTRY();
+	memcpy(hdr_ipv4.h_source, src_mac_addr, ETH_ALEN);
+	memcpy(hdr_ipv4.h_dest, dst_mac_addr, ETH_ALEN);
+	hdr_ipv4.h_proto = htons(ETHERTYPE_IPV4);
+
+	memcpy(hdr_ipv6.h_source, src_mac_addr, ETH_ALEN);
+	memcpy(hdr_ipv6.h_dest, dst_mac_addr, ETH_ALEN);
+	hdr_ipv6.h_proto = htons(ETHERTYPE_IPV6);
+
+	/* Add headers to the header insertion tables */
+	hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+		       2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (hdrs == NULL) {
+		TETH_ERR("Failed allocating memory for headers !\n");
+		return -ENOMEM;
+	}
+
+	hdrs->commit = 0;
+	hdrs->num_hdrs = 2;
+
+	/* Ethernet IPv4 header */
+	strlcpy(hdrs->hdr[0].name, hdr_name_ipv4, IPA_RESOURCE_NAME_MAX);
+	hdrs->hdr[0].hdr_len = ETH_HLEN;
+	memcpy(hdrs->hdr[0].hdr, &hdr_ipv4, ETH_HLEN);
+
+	/* Ethernet IPv6 header */
+	strlcpy(hdrs->hdr[1].name, hdr_name_ipv6, IPA_RESOURCE_NAME_MAX);
+	hdrs->hdr[1].hdr_len = ETH_HLEN;
+	memcpy(hdrs->hdr[1].hdr, &hdr_ipv6, ETH_HLEN);
+
+	res = ipa_add_hdr(hdrs);
+	if (res || hdrs->hdr[0].status || hdrs->hdr[1].status)
+		TETH_ERR("Header insertion failed\n");
+	kfree(hdrs);
+	TETH_DBG_FUNC_EXIT();
+
+	return res;
+}
+
+static int configure_ipa_header_block_internal(u32 usb_ipa_hdr_len,
+					       u32 a2_ipa_hdr_len,
+					       u32 ipa_usb_hdr_len,
+					       u32 ipa_a2_hdr_len)
+{
+	struct ipa_ep_cfg_hdr hdr_cfg;
+	int res;
+
+	TETH_DBG_FUNC_ENTRY();
+	/* Configure header removal for the USB->IPA pipe and A2->IPA pipe */
+	memset(&hdr_cfg, 0, sizeof(hdr_cfg));
+	hdr_cfg.hdr_len = usb_ipa_hdr_len;
+	res = ipa_cfg_ep_hdr(teth_ctx->usb_ipa_pipe_hdl, &hdr_cfg);
+	if (res) {
+		TETH_ERR("Header removal config for USB->IPA pipe failed\n");
+		goto bail;
+	}
+
+	hdr_cfg.hdr_len = a2_ipa_hdr_len;
+	res = ipa_cfg_ep_hdr(teth_ctx->a2_ipa_pipe_hdl, &hdr_cfg);
+	if (res) {
+		TETH_ERR("Header removal config for A2->IPA pipe failed\n");
+		goto bail;
+	}
+
+	/* Configure header insertion for the IPA->USB pipe and IPA->A2 pipe */
+	hdr_cfg.hdr_len = ipa_usb_hdr_len;
+	res = ipa_cfg_ep_hdr(teth_ctx->ipa_usb_pipe_hdl, &hdr_cfg);
+	if (res) {
+		TETH_ERR("Header insertion config for IPA->USB pipe failed\n");
+		goto bail;
+	}
+
+	hdr_cfg.hdr_len = ipa_a2_hdr_len;
+	res = ipa_cfg_ep_hdr(teth_ctx->ipa_a2_pipe_hdl, &hdr_cfg);
+	if (res) {
+		TETH_ERR("Header insertion config for IPA->A2 pipe failed\n");
+		goto bail;
+	}
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static int add_mbim_hdr(void)
+{
+	int res;
+	struct ipa_ioc_add_hdr *mbim_hdr;
+	u8 mbim_stream_id = 0;
+
+	TETH_DBG_FUNC_ENTRY();
+	mbim_hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
+			   sizeof(struct ipa_hdr_add),
+			   GFP_KERNEL);
+	if (!mbim_hdr) {
+		TETH_ERR("Failed allocating memory for MBIM header\n");
+		return -ENOMEM;
+	}
+
+	mbim_hdr->commit = 0;
+	mbim_hdr->num_hdrs = 1;
+	strlcpy(mbim_hdr->hdr[0].name, MBIM_HEADER_NAME, IPA_RESOURCE_NAME_MAX);
+	memcpy(mbim_hdr->hdr[0].hdr, &mbim_stream_id, sizeof(u8));
+	mbim_hdr->hdr[0].hdr_len = sizeof(u8);
+	mbim_hdr->hdr[0].is_partial = false;
+	res = ipa_add_hdr(mbim_hdr);
+	if (res || mbim_hdr->hdr[0].status) {
+		TETH_ERR("Failed adding MBIM header\n");
+		res = -EFAULT;
+	} else {
+		TETH_DBG("Added MBIM stream ID header\n");
+	}
+	kfree(mbim_hdr);
+	TETH_DBG_FUNC_EXIT();
+
+	return res;
+}
+
+static int configure_ipa_header_block(void)
+{
+	int res;
+	u32 hdr_len = 0;
+	u32 ipa_usb_hdr_len = 0;
+
+	TETH_DBG_FUNC_ENTRY();
+	if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP) {
+		/*
+		 * Create a new header for MBIM stream ID and associate it with
+		 * the IPA->USB routing table
+		 */
+		if (teth_ctx->aggr_params.dl.aggr_prot ==
+					TETH_AGGR_PROTOCOL_MBIM) {
+			ipa_usb_hdr_len = 1;
+			res = add_mbim_hdr();
+			if (res) {
+				TETH_ERR("Failed adding MBIM header\n");
+				goto bail;
+			}
+		}
+	} else if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) {
+		/* Add a header entry for USB */
+		res = add_eth_hdrs(USB_ETH_HDR_NAME_IPV4,
+				   USB_ETH_HDR_NAME_IPV6,
+				   teth_ctx->mac_addresses.host_pc_mac_addr,
+				   teth_ctx->mac_addresses.device_mac_addr);
+		if (res) {
+			TETH_ERR("Failed adding USB Ethernet header\n");
+			goto bail;
+		}
+		TETH_DBG("Added USB Ethernet headers (IPv4 / IPv6)\n");
+
+		/* Add a header entry for A2 */
+		res = add_eth_hdrs(A2_ETH_HDR_NAME_IPV4,
+				   A2_ETH_HDR_NAME_IPV6,
+				   teth_ctx->mac_addresses.device_mac_addr,
+				   teth_ctx->mac_addresses.host_pc_mac_addr);
+		if (res) {
+			TETH_ERR("Failed adding A2 Ethernet header\n");
+			goto bail;
+		}
+		TETH_DBG("Added A2 Ethernet headers (IPv4 / IPv6\n");
+
+		hdr_len = ETH_HLEN;
+		ipa_usb_hdr_len = ETH_HLEN;
+	}
+
+	res = configure_ipa_header_block_internal(hdr_len,
+						  hdr_len,
+						  ipa_usb_hdr_len,
+						  hdr_len);
+	if (res) {
+		TETH_ERR("Configuration of header removal/insertion failed\n");
+		goto bail;
+	}
+
+	res = ipa_commit_hdr();
+	if (res) {
+		TETH_ERR("Failed committing headers\n");
+		goto bail;
+	}
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static int configure_routing_by_ip(char *hdr_name,
+			    char *rt_tbl_name,
+			    enum ipa_client_type dst,
+			    enum ipa_ip_type ip_address_family)
+{
+
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_ioc_get_hdr hdr_info;
+	int res;
+
+	TETH_DBG_FUNC_ENTRY();
+	/* Get the header handle */
+	memset(&hdr_info, 0, sizeof(hdr_info));
+	strlcpy(hdr_info.name, hdr_name, IPA_RESOURCE_NAME_MAX);
+	ipa_get_hdr(&hdr_info);
+
+	rt_rule = kzalloc(sizeof(struct ipa_ioc_add_rt_rule) +
+			  1 * sizeof(struct ipa_rt_rule_add),
+			  GFP_KERNEL);
+	if (!rt_rule) {
+		TETH_ERR("Memory allocation failure");
+		return -ENOMEM;
+	}
+
+	/* Match all, do not commit to HW*/
+	rt_rule->commit = 0;
+	rt_rule->num_rules = 1;
+	rt_rule->ip = ip_address_family;
+	strlcpy(rt_rule->rt_tbl_name, rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+	rt_rule->rules[0].rule.dst = dst;
+	rt_rule->rules[0].rule.hdr_hdl = hdr_info.hdl;
+	rt_rule->rules[0].rule.attrib.attrib_mask = 0; /* Match all */
+	res = ipa_add_rt_rule(rt_rule);
+	if (res || rt_rule->rules[0].status)
+		TETH_ERR("Failed adding routing rule\n");
+	kfree(rt_rule);
+	TETH_DBG_FUNC_EXIT();
+
+	return res;
+}
+
+static int configure_routing(char *hdr_name_ipv4,
+			     char *rt_tbl_name_ipv4,
+			     char *hdr_name_ipv6,
+			     char *rt_tbl_name_ipv6,
+			     enum ipa_client_type dst)
+{
+	int res;
+
+	TETH_DBG_FUNC_ENTRY();
+	/* Configure IPv4 routing table */
+	res = configure_routing_by_ip(hdr_name_ipv4,
+				      rt_tbl_name_ipv4,
+				      dst,
+				      IPA_IP_v4);
+	if (res) {
+		TETH_ERR("Failed adding IPv4 routing table\n");
+		goto bail;
+	}
+
+	/* Configure IPv6 routing table */
+	res = configure_routing_by_ip(hdr_name_ipv6,
+				      rt_tbl_name_ipv6,
+				      dst,
+				      IPA_IP_v6);
+	if (res) {
+		TETH_ERR("Failed adding IPv6 routing table\n");
+		goto bail;
+	}
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static int configure_ipa_routing_block(void)
+{
+	int res;
+	char hdr_name_ipv4[IPA_RESOURCE_NAME_MAX];
+	char hdr_name_ipv6[IPA_RESOURCE_NAME_MAX];
+
+	TETH_DBG_FUNC_ENTRY();
+	hdr_name_ipv4[0] = '\0';
+	hdr_name_ipv6[0] = '\0';
+
+	/* Configure USB -> A2 routing table */
+	if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) {
+		strlcpy(hdr_name_ipv4,
+			A2_ETH_HDR_NAME_IPV4,
+			IPA_RESOURCE_NAME_MAX);
+		strlcpy(hdr_name_ipv6,
+			A2_ETH_HDR_NAME_IPV6,
+			IPA_RESOURCE_NAME_MAX);
+	}
+	res = configure_routing(hdr_name_ipv4,
+				USB_TO_A2_RT_TBL_NAME_IPV4,
+				hdr_name_ipv6,
+				USB_TO_A2_RT_TBL_NAME_IPV6,
+				IPA_CLIENT_A2_TETHERED_CONS);
+	if (res) {
+		TETH_ERR("USB to A2 routing block configuration failed\n");
+		goto bail;
+	}
+
+	/* Configure A2 -> USB routing table */
+	if (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) {
+		strlcpy(hdr_name_ipv4,
+			USB_ETH_HDR_NAME_IPV4,
+			IPA_RESOURCE_NAME_MAX);
+		strlcpy(hdr_name_ipv6,
+			USB_ETH_HDR_NAME_IPV6,
+			IPA_RESOURCE_NAME_MAX);
+	} else if (teth_ctx->aggr_params.dl.aggr_prot ==
+						TETH_AGGR_PROTOCOL_MBIM) {
+		strlcpy(hdr_name_ipv4,
+			MBIM_HEADER_NAME,
+			IPA_RESOURCE_NAME_MAX);
+		strlcpy(hdr_name_ipv6,
+			MBIM_HEADER_NAME,
+			IPA_RESOURCE_NAME_MAX);
+	}
+	res = configure_routing(hdr_name_ipv4,
+				A2_TO_USB_RT_TBL_NAME_IPV4,
+				hdr_name_ipv6,
+				A2_TO_USB_RT_TBL_NAME_IPV6,
+				IPA_CLIENT_USB_CONS);
+	if (res) {
+		TETH_ERR("A2 to USB routing block configuration failed\n");
+		goto bail;
+	}
+
+	/* Commit all the changes to HW in one shot */
+	res = ipa_commit_rt(IPA_IP_v4);
+	if (res) {
+		TETH_ERR("Failed commiting IPv4 routing tables\n");
+		goto bail;
+	}
+	res = ipa_commit_rt(IPA_IP_v6);
+	if (res) {
+		TETH_ERR("Failed commiting IPv6 routing tables\n");
+		goto bail;
+	}
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static int configure_filtering_by_ip(char *rt_tbl_name,
+			      enum ipa_client_type src,
+			      enum ipa_ip_type ip_address_family)
+{
+	struct ipa_ioc_add_flt_rule *flt_tbl;
+	struct ipa_ioc_get_rt_tbl rt_tbl_info;
+	int res;
+
+	TETH_DBG_FUNC_ENTRY();
+	/* Get the needed routing table handle */
+	rt_tbl_info.ip = ip_address_family;
+	strlcpy(rt_tbl_info.name, rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+	res = ipa_get_rt_tbl(&rt_tbl_info);
+	if (res) {
+		TETH_ERR("Failed getting routing table handle\n");
+		goto bail;
+	}
+
+	flt_tbl = kzalloc(sizeof(struct ipa_ioc_add_flt_rule) +
+			  1 * sizeof(struct ipa_flt_rule_add), GFP_KERNEL);
+	if (!flt_tbl) {
+		TETH_ERR("Filtering table memory allocation failure\n");
+		return -ENOMEM;
+	}
+
+	flt_tbl->commit = 0;
+	flt_tbl->ep = src;
+	flt_tbl->global = 0;
+	flt_tbl->ip = ip_address_family;
+	flt_tbl->num_rules = 1;
+	flt_tbl->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+	flt_tbl->rules[0].rule.rt_tbl_hdl = rt_tbl_info.hdl;
+	flt_tbl->rules[0].rule.attrib.attrib_mask = 0; /* Match all */
+
+	res = ipa_add_flt_rule(flt_tbl);
+	if (res || flt_tbl->rules[0].status)
+		TETH_ERR("Failed adding filtering table\n");
+	kfree(flt_tbl);
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static int configure_filtering(char *rt_tbl_name_ipv4,
+			char *rt_tbl_name_ipv6,
+			enum ipa_client_type src)
+{
+	int res;
+
+	TETH_DBG_FUNC_ENTRY();
+	res = configure_filtering_by_ip(rt_tbl_name_ipv4, src, IPA_IP_v4);
+	if (res) {
+		TETH_ERR("Failed adding IPv4 filtering table\n");
+		goto bail;
+	}
+
+	res = configure_filtering_by_ip(rt_tbl_name_ipv6, src, IPA_IP_v6);
+	if (res) {
+		TETH_ERR("Failed adding IPv4 filtering table\n");
+		goto bail;
+	}
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static int configure_ipa_filtering_block(void)
+{
+	int res;
+
+	TETH_DBG_FUNC_ENTRY();
+	/* Filter all traffic coming from USB to A2 */
+	res = configure_filtering(USB_TO_A2_RT_TBL_NAME_IPV4,
+				  USB_TO_A2_RT_TBL_NAME_IPV6,
+				  IPA_CLIENT_USB_PROD);
+	if (res) {
+		TETH_ERR("USB_PROD ep filtering configuration failed\n");
+		goto bail;
+	}
+
+	/* Filter all traffic coming from A2 to USB */
+	res = configure_filtering(A2_TO_USB_RT_TBL_NAME_IPV4,
+				  A2_TO_USB_RT_TBL_NAME_IPV6,
+				  IPA_CLIENT_A2_TETHERED_PROD);
+	if (res) {
+		TETH_ERR("A2_PROD filtering configuration failed\n");
+		goto bail;
+	}
+
+	/* Commit all the changes to HW in one shot */
+	res = ipa_commit_flt(IPA_IP_v4);
+	if (res) {
+		TETH_ERR("Failed commiting IPv4 filtering tables\n");
+		goto bail;
+	}
+	res = ipa_commit_flt(IPA_IP_v6);
+	if (res) {
+		TETH_ERR("Failed commiting IPv6 filtering tables\n");
+		goto bail;
+	}
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static int prepare_ipa_aggr_struct(
+	const struct teth_aggr_params_link *teth_aggr_params,
+	struct ipa_ep_cfg_aggr *ipa_aggr_params,
+	bool client_is_prod)
+{
+	TETH_DBG_FUNC_ENTRY();
+	memset(ipa_aggr_params, 0, sizeof(*ipa_aggr_params));
+
+	switch (teth_aggr_params->aggr_prot) {
+	case TETH_AGGR_PROTOCOL_NONE:
+		ipa_aggr_params->aggr_en = IPA_BYPASS_AGGR;
+		break;
+	case TETH_AGGR_PROTOCOL_MBIM:
+		 ipa_aggr_params->aggr = IPA_MBIM_16;
+		 ipa_aggr_params->aggr_en = (client_is_prod) ?
+			 IPA_ENABLE_DEAGGR : IPA_ENABLE_AGGR;
+		 break;
+	case TETH_AGGR_PROTOCOL_TLP:
+		ipa_aggr_params->aggr = IPA_TLP;
+		ipa_aggr_params->aggr_en = (client_is_prod) ?
+			IPA_ENABLE_DEAGGR : IPA_ENABLE_AGGR;
+		break;
+	default:
+		TETH_ERR("Unsupported aggregation protocol\n");
+		return -EFAULT;
+	}
+
+	ipa_aggr_params->aggr_byte_limit =
+		teth_aggr_params->max_transfer_size_byte / 1024;
+	ipa_aggr_params->aggr_time_limit = TETH_DEFAULT_AGGR_TIME_LIMIT;
+	TETH_DBG_FUNC_EXIT();
+
+	return 0;
+}
+
+static int teth_set_aggr_per_ep(
+	const struct teth_aggr_params_link *teth_aggr_params,
+	bool client_is_prod,
+	u32 pipe_hdl)
+{
+	struct ipa_ep_cfg_aggr agg_params;
+	struct ipa_ep_cfg_hdr hdr_params;
+	int res;
+
+	TETH_DBG_FUNC_ENTRY();
+	res = prepare_ipa_aggr_struct(teth_aggr_params,
+				      &agg_params,
+				      client_is_prod);
+	if (res) {
+		TETH_ERR("prepare_ipa_aggregation_struct() failed\n");
+		goto bail;
+	}
+
+	res = ipa_cfg_ep_aggr(pipe_hdl, &agg_params);
+	if (res) {
+		TETH_ERR("ipa_cfg_ep_aggr() failed\n");
+		goto bail;
+	}
+
+	if (!client_is_prod) {
+		memset(&hdr_params, 0, sizeof(hdr_params));
+		hdr_params.hdr_len = 1;
+		res = ipa_cfg_ep_hdr(pipe_hdl, &hdr_params);
+		if (res) {
+			TETH_ERR("ipa_cfg_ep_hdr() failed\n");
+			goto bail;
+		}
+	}
+	TETH_DBG_FUNC_EXIT();
+
+bail:
+	return res;
+}
+
+static void aggr_prot_to_str(enum teth_aggr_protocol_type aggr_prot,
+			     char *buff,
+			     uint buff_size)
+{
+	switch (aggr_prot) {
+	case TETH_AGGR_PROTOCOL_NONE:
+		strlcpy(buff, "NONE", buff_size);
+		break;
+	case TETH_AGGR_PROTOCOL_MBIM:
+		strlcpy(buff, "MBIM", buff_size);
+		break;
+	case TETH_AGGR_PROTOCOL_TLP:
+		strlcpy(buff, "TLP", buff_size);
+		break;
+	default:
+		strlcpy(buff, "ERROR", buff_size);
+		break;
+	}
+}
+
+static int teth_set_aggregation(void)
+{
+	int res;
+	char aggr_prot_str[20];
+
+	TETH_DBG_FUNC_ENTRY();
+	if (teth_ctx->aggr_params.ul.aggr_prot == TETH_AGGR_PROTOCOL_MBIM ||
+	    teth_ctx->aggr_params.dl.aggr_prot == TETH_AGGR_PROTOCOL_MBIM) {
+		res = ipa_set_aggr_mode(IPA_MBIM);
+		if (res) {
+			TETH_ERR("ipa_set_aggr_mode() failed\n");
+			goto bail;
+		}
+		res = ipa_set_single_ndp_per_mbim(false);
+		if (res) {
+			TETH_ERR("ipa_set_single_ndp_per_mbim() failed\n");
+			goto bail;
+		}
+	}
+
+	aggr_prot_to_str(teth_ctx->aggr_params.ul.aggr_prot,
+			 aggr_prot_str,
+			 sizeof(aggr_prot_str)-1);
+	TETH_DBG("Setting %s aggregation on UL\n", aggr_prot_str);
+	aggr_prot_to_str(teth_ctx->aggr_params.dl.aggr_prot,
+			 aggr_prot_str,
+			 sizeof(aggr_prot_str)-1);
+	TETH_DBG("Setting %s aggregation on DL\n", aggr_prot_str);
+
+	/* Configure aggregation on UL producer (USB->IPA) */
+	res = teth_set_aggr_per_ep(&teth_ctx->aggr_params.ul,
+				   true,
+				   teth_ctx->usb_ipa_pipe_hdl);
+	if (res) {
+		TETH_ERR("teth_set_aggregation_per_ep() failed\n");
+		goto bail;
+	}
+
+	/* Configure aggregation on DL consumer (IPA->USB) */
+	res = teth_set_aggr_per_ep(&teth_ctx->aggr_params.dl,
+				   false,
+				   teth_ctx->ipa_usb_pipe_hdl);
+	if (res) {
+		TETH_ERR("teth_set_aggregation_per_ep() failed\n");
+		goto bail;
+	}
+	TETH_DBG_FUNC_EXIT();
+bail:
+	return res;
+}
+
+static void complete_hw_bridge(struct work_struct *work)
+{
+	int res;
+	static DEFINE_MUTEX(f_lock);
+
+	mutex_lock(&f_lock);
+
+	TETH_DBG_FUNC_ENTRY();
+	TETH_DBG("Completing HW bridge in %s mode\n",
+		 (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) ?
+		 "ETHERNET" :
+		 "IP");
+
+	res = teth_set_aggregation();
+	if (res) {
+		TETH_ERR("Failed setting aggregation params\n");
+		goto bail;
+	}
+
+	/*
+	 * Reset the Header, Routing and Filtering blocks.
+	 * Resetting the Header block will also reset the other blocks.
+	 * This reset is not comitted to HW.
+	 */
+	res = ipa_reset_hdr();
+	if (res) {
+		TETH_ERR("Failed resetting IPA\n");
+		goto bail;
+	}
+
+	res = configure_ipa_header_block();
+	if (res) {
+		TETH_ERR("Configuration of IPA header block Failed\n");
+		goto bail;
+	}
+
+	res = configure_ipa_routing_block();
+	if (res) {
+		TETH_ERR("Configuration of IPA routing block Failed\n");
+		goto bail;
+	}
+
+	res = configure_ipa_filtering_block();
+	if (res) {
+		TETH_ERR("Configuration of IPA filtering block Failed\n");
+		goto bail;
+	}
+
+	teth_ctx->is_hw_bridge_complete = true;
+	teth_ctx->comp_hw_bridge_in_progress = false;
+bail:
+	mutex_unlock(&f_lock);
+	TETH_DBG_FUNC_EXIT();
+
+	return;
+}
+
+static void mac_addr_to_str(u8 mac_addr[ETH_ALEN],
+		     char *buff,
+		     uint buff_size)
+{
+	scnprintf(buff, buff_size, "%02x-%02x-%02x-%02x-%02x-%02x",
+		  mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+		  mac_addr[4], mac_addr[5]);
+}
+
+static void check_to_complete_hw_bridge(struct sk_buff *skb,
+					u8 *my_mac_addr,
+					bool *my_mac_addr_known,
+					bool *peer_mac_addr_known)
+{
+	bool both_mac_addresses_known;
+	char mac_addr_str[20];
+
+	if ((teth_ctx->link_protocol == TETH_LINK_PROTOCOL_ETHERNET) &&
+	    (!(*my_mac_addr_known))) {
+		memcpy(my_mac_addr, &skb->data[ETH_ALEN], ETH_ALEN);
+		mac_addr_to_str(my_mac_addr,
+				mac_addr_str,
+				sizeof(mac_addr_str)-1);
+		TETH_DBG("Extracted MAC addr: %s\n", mac_addr_str);
+		*my_mac_addr_known = true;
+	}
+
+	both_mac_addresses_known = *my_mac_addr_known && *peer_mac_addr_known;
+	if ((both_mac_addresses_known ||
+	    (teth_ctx->link_protocol == TETH_LINK_PROTOCOL_IP)) &&
+	    (!teth_ctx->comp_hw_bridge_in_progress) &&
+	    (teth_ctx->aggr_params_known)) {
+		INIT_WORK(&teth_ctx->comp_hw_bridge_work, complete_hw_bridge);
+		teth_ctx->comp_hw_bridge_in_progress = true;
+		schedule_work(&teth_ctx->comp_hw_bridge_work);
+	}
+}
+
+static void usb_notify_cb(void *priv,
+			  enum ipa_dp_evt_type evt,
+			  unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	int res;
+
+	switch (evt) {
+	case IPA_RECEIVE:
+		if (!teth_ctx->is_hw_bridge_complete)
+			check_to_complete_hw_bridge(
+				skb,
+				teth_ctx->mac_addresses.host_pc_mac_addr,
+				&teth_ctx->mac_addresses.host_pc_mac_addr_known,
+				&teth_ctx->mac_addresses.device_mac_addr_known);
+
+		/* Send the packet to A2, using a2_service driver API */
+		res = a2_mux_write(A2_MUX_TETHERED_0, skb);
+		if (res) {
+			TETH_ERR("Packet send failure, dropping packet !\n");
+			dev_kfree_skb(skb);
+		}
+		break;
+
+	case IPA_WRITE_DONE:
+		dev_kfree_skb(skb);
+		break;
+
+	default:
+		TETH_ERR("Unsupported IPA event !\n");
+		WARN_ON(1);
+	}
+
+	return;
+}
+
+static void a2_notify_cb(void *user_data,
+			 enum a2_mux_event_type event,
+			 unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	int res;
+
+	switch (event) {
+	case A2_MUX_RECEIVE:
+		if (!teth_ctx->is_hw_bridge_complete)
+			check_to_complete_hw_bridge(
+				skb,
+				teth_ctx->mac_addresses.device_mac_addr,
+				&teth_ctx->mac_addresses.device_mac_addr_known,
+				&teth_ctx->
+				mac_addresses.host_pc_mac_addr_known);
+
+		/* Send the packet to USB */
+		res = ipa_tx_dp(IPA_CLIENT_USB_CONS, skb, NULL);
+		if (res) {
+			TETH_ERR("Packet send failure, dropping packet !\n");
+			dev_kfree_skb(skb);
+		}
+		break;
+
+	case A2_MUX_WRITE_DONE:
+		dev_kfree_skb(skb);
+		break;
+
+	default:
+		TETH_ERR("Unsupported IPA event !\n");
+		WARN_ON(1);
+	}
+
+	return;
+}
+
+static void bridge_prod_notify_cb(void *notify_cb_data,
+				  enum ipa_rm_event event,
+				  unsigned long data)
+{
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		complete(&teth_ctx->is_bridge_prod_up);
+		break;
+
+	case IPA_RM_RESOURCE_RELEASED:
+		complete(&teth_ctx->is_bridge_prod_down);
+		break;
+
+	default:
+		TETH_ERR("Unsupported notification!\n");
+		WARN_ON(1);
+		break;
+	}
+
+	return;
+}
+
+/**
+* teth_bridge_init() - Initialize the Tethering bridge driver
+* @usb_notify_cb_ptr:	Callback function which should be used
+*			by the caller. Output parameter.
+* @private_data_ptr:	Data for the callback function. Should
+*			be used by the caller. Output parameter.
+* Return codes: 0: success,
+*		-EINVAL - Bad parameter
+*		Other negative value - Failure
+*/
+int teth_bridge_init(ipa_notify_cb *usb_notify_cb_ptr, void **private_data_ptr)
+{
+	int res = 0;
+	struct ipa_rm_create_params bridge_prod_params;
+
+	TETH_DBG_FUNC_ENTRY();
+	if (usb_notify_cb_ptr == NULL) {
+		TETH_ERR("Bad parameter\n");
+		res = -EINVAL;
+		goto bail;
+	}
+
+	*usb_notify_cb_ptr = usb_notify_cb;
+	*private_data_ptr = NULL;
+
+	/* Build IPA Resource manager dependency graph */
+	bridge_prod_params.name = IPA_RM_RESOURCE_BRIDGE_PROD;
+	bridge_prod_params.reg_params.user_data = NULL;
+	bridge_prod_params.reg_params.notify_cb = bridge_prod_notify_cb;
+	res = ipa_rm_create_resource(&bridge_prod_params);
+	if (res) {
+		TETH_ERR("ipa_rm_create_resource() failed\n");
+		goto bail;
+	}
+
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+				    IPA_RM_RESOURCE_USB_CONS);
+	if (res) {
+		TETH_ERR("ipa_rm_add_dependency() failed\n");
+		goto bail;
+	}
+
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+				    IPA_RM_RESOURCE_A2_CONS);
+	if (res) {
+		TETH_ERR("ipa_rm_add_dependency() failed\n");
+		goto fail_add_dependency_1;
+	}
+
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
+				    IPA_RM_RESOURCE_A2_CONS);
+	if (res) {
+		TETH_ERR("ipa_rm_add_dependency() failed\n");
+		goto fail_add_dependency_2;
+	}
+
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_A2_PROD,
+				    IPA_RM_RESOURCE_USB_CONS);
+	if (res) {
+		TETH_ERR("ipa_rm_add_dependency() failed\n");
+		goto fail_add_dependency_3;
+	}
+
+	init_completion(&teth_ctx->is_bridge_prod_up);
+	init_completion(&teth_ctx->is_bridge_prod_down);
+
+	/* The default link protocol is Ethernet */
+	teth_ctx->link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+	goto bail;
+
+fail_add_dependency_3:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+				 IPA_RM_RESOURCE_A2_CONS);
+fail_add_dependency_2:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+				 IPA_RM_RESOURCE_A2_CONS);
+fail_add_dependency_1:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_BRIDGE_PROD,
+				 IPA_RM_RESOURCE_USB_CONS);
+bail:
+	TETH_DBG_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(teth_bridge_init);
+
+/**
+* teth_bridge_disconnect() - Disconnect tethering bridge module
+*
+* Return codes:	0: success
+*		-EPERM: Operation not permitted as the bridge is already
+*		disconnected
+*/
+int teth_bridge_disconnect(void)
+{
+	int res = -EPERM;
+
+	TETH_DBG_FUNC_ENTRY();
+	if (!teth_ctx->is_connected) {
+		TETH_ERR(
+		"Trying to disconnect an already disconnected bridge\n");
+		goto bail;
+	}
+
+	teth_ctx->is_connected = false;
+
+	res = ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+	if (res == -EINPROGRESS)
+		wait_for_completion(&teth_ctx->is_bridge_prod_down);
+
+bail:
+	TETH_DBG_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(teth_bridge_disconnect);
+
+/**
+* teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params:	Connection info
+*
+* Return codes: 0: success
+*		-EINVAL: invalid parameters
+*		-EPERM: Operation not permitted as the bridge is already
+*		connected
+*/
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+	int res;
+	struct ipa_ep_cfg ipa_ep_cfg;
+
+	TETH_DBG_FUNC_ENTRY();
+	if (teth_ctx->is_connected) {
+		TETH_ERR("Trying to connect an already connected bridge !\n");
+		return -EPERM;
+	}
+	if (connect_params == NULL ||
+	    connect_params->ipa_usb_pipe_hdl <= 0 ||
+	    connect_params->usb_ipa_pipe_hdl <= 0 ||
+	    connect_params->tethering_mode >= TETH_TETHERING_MODE_MAX ||
+	    connect_params->tethering_mode < 0)
+		return -EINVAL;
+
+	teth_ctx->ipa_usb_pipe_hdl = connect_params->ipa_usb_pipe_hdl;
+	teth_ctx->usb_ipa_pipe_hdl = connect_params->usb_ipa_pipe_hdl;
+	teth_ctx->tethering_mode = connect_params->tethering_mode;
+
+	res = ipa_rm_request_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+	if (res < 0) {
+		if (res == -EINPROGRESS)
+			wait_for_completion(&teth_ctx->is_bridge_prod_up);
+		else
+			goto bail;
+	}
+
+	res = a2_mux_open_channel(A2_MUX_TETHERED_0,
+				  NULL,
+				  a2_notify_cb);
+	if (res) {
+		TETH_ERR("a2_mux_open_channel() failed\n");
+		goto bail;
+	}
+
+	res = a2_mux_get_tethered_client_handles(A2_MUX_TETHERED_0,
+						 &teth_ctx->ipa_a2_pipe_hdl,
+						 &teth_ctx->a2_ipa_pipe_hdl);
+	if (res) {
+		TETH_ERR(
+		"a2_mux_get_tethered_client_handles() failed, res = %d\n", res);
+		goto bail;
+	}
+
+	/* Reset the various endpoints configuration */
+	memset(&ipa_ep_cfg, 0, sizeof(ipa_ep_cfg));
+	ipa_cfg_ep(teth_ctx->ipa_usb_pipe_hdl, &ipa_ep_cfg);
+	ipa_cfg_ep(teth_ctx->usb_ipa_pipe_hdl, &ipa_ep_cfg);
+	ipa_cfg_ep(teth_ctx->ipa_a2_pipe_hdl, &ipa_ep_cfg);
+	ipa_cfg_ep(teth_ctx->a2_ipa_pipe_hdl, &ipa_ep_cfg);
+
+	teth_ctx->is_connected = true;
+
+	if (teth_ctx->tethering_mode == TETH_TETHERING_MODE_MBIM)
+		teth_ctx->link_protocol = TETH_LINK_PROTOCOL_IP;
+	TETH_DBG_FUNC_EXIT();
+bail:
+	if (res)
+		ipa_rm_release_resource(IPA_RM_RESOURCE_BRIDGE_PROD);
+	return res;
+}
+EXPORT_SYMBOL(teth_bridge_connect);
+
+static void set_aggr_default_params(struct teth_aggr_params_link *params)
+{
+	if (params->max_datagrams == 0)
+		params->max_datagrams = 16;
+	if (params->max_transfer_size_byte == 0)
+		params->max_transfer_size_byte = 16*1024;
+}
+
+static void teth_set_bridge_mode(enum teth_link_protocol_type link_protocol)
+{
+	teth_ctx->link_protocol = link_protocol;
+	teth_ctx->is_hw_bridge_complete = false;
+	memset(&teth_ctx->mac_addresses, 0, sizeof(teth_ctx->mac_addresses));
+}
+
+static long teth_bridge_ioctl(struct file *filp,
+			      unsigned int cmd,
+			      unsigned long arg)
+{
+	int res = 0;
+
+	TETH_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if ((_IOC_TYPE(cmd) != TETH_BRIDGE_IOC_MAGIC) ||
+	    (_IOC_NR(cmd) >= TETH_BRIDGE_IOCTL_MAX)) {
+		TETH_ERR("Invalid ioctl\n");
+		return -ENOIOCTLCMD;
+	}
+
+	switch (cmd) {
+	case TETH_BRIDGE_IOC_SET_BRIDGE_MODE:
+		TETH_DBG("TETH_BRIDGE_IOC_SET_BRIDGE_MODE ioctl called\n");
+		if (teth_ctx->link_protocol != arg)
+			teth_set_bridge_mode(arg);
+		break;
+
+	case TETH_BRIDGE_IOC_SET_AGGR_PARAMS:
+		TETH_DBG("TETH_BRIDGE_IOC_SET_AGGR_PARAMS ioctl called\n");
+		res = copy_from_user(&teth_ctx->aggr_params,
+				   (struct teth_aggr_params *)arg,
+				   sizeof(struct teth_aggr_params));
+		if (res) {
+			TETH_ERR("Error, res = %d\n", res);
+			res = -EFAULT;
+			break;
+		}
+		set_aggr_default_params(&teth_ctx->aggr_params.dl);
+		set_aggr_default_params(&teth_ctx->aggr_params.ul);
+		teth_ctx->aggr_params_known = true;
+		break;
+
+	case TETH_BRIDGE_IOC_GET_AGGR_PARAMS:
+		TETH_DBG("TETH_BRIDGE_IOC_GET_AGGR_PARAMS ioctl called\n");
+		if (copy_to_user((u8 *)arg, (u8 *)&teth_ctx->aggr_params,
+				   sizeof(struct teth_aggr_params))) {
+			res = -EFAULT;
+			break;
+		}
+		break;
+
+	case TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES:
+	{
+		u16 sz;
+		u16 pyld_sz;
+		struct teth_aggr_capabilities caps;
+
+		TETH_DBG("GET_AGGR_CAPABILITIES ioctl called\n");
+		sz = sizeof(struct teth_aggr_capabilities);
+		if (copy_from_user(&caps,
+				   (struct teth_aggr_capabilities *)arg,
+				   sz)) {
+			res = -EFAULT;
+			break;
+		}
+
+		if (caps.num_protocols < teth_ctx->aggr_caps->num_protocols) {
+			caps.num_protocols = teth_ctx->aggr_caps->num_protocols;
+			if (copy_to_user((struct teth_aggr_capabilities *)arg,
+					 &caps,
+					 sz)) {
+				res = -EFAULT;
+				break;
+			}
+			TETH_DBG("Not enough space allocated.\n");
+			res = -EAGAIN;
+			break;
+		}
+
+		pyld_sz = sz + caps.num_protocols *
+			sizeof(struct teth_aggr_params_link);
+
+		if (copy_to_user((u8 *)arg,
+				 (u8 *)(teth_ctx->aggr_caps),
+				 pyld_sz)) {
+			res = -EFAULT;
+			break;
+		}
+	}
+	break;
+	}
+
+	return res;
+}
+
+static void set_aggr_capabilities(void)
+{
+	u16 NUM_PROTOCOLS = 2;
+
+	teth_ctx->aggr_caps = kzalloc(sizeof(struct teth_aggr_capabilities) +
+				      NUM_PROTOCOLS *
+				      sizeof(struct teth_aggr_params_link),
+				      GFP_KERNEL);
+	if (teth_ctx->aggr_caps == NULL) {
+		TETH_ERR("Memory alloc failed for aggregation capabilities.\n");
+		return;
+	}
+
+	teth_ctx->aggr_caps->num_protocols = NUM_PROTOCOLS;
+
+	teth_ctx->aggr_caps->prot_caps[0].aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+	teth_ctx->aggr_caps->prot_caps[0].max_datagrams = 16;
+	teth_ctx->aggr_caps->prot_caps[0].max_transfer_size_byte = 16*1024;
+
+	teth_ctx->aggr_caps->prot_caps[1].aggr_prot = TETH_AGGR_PROTOCOL_TLP;
+	teth_ctx->aggr_caps->prot_caps[1].max_datagrams = 16;
+	teth_ctx->aggr_caps->prot_caps[1].max_transfer_size_byte = 16*1024;
+}
+
+void teth_bridge_get_client_handles(u32 *producer_handle,
+		u32 *consumer_handle)
+{
+	if (producer_handle == NULL || consumer_handle == NULL)
+		return;
+
+	*producer_handle = teth_ctx->usb_ipa_pipe_hdl;
+	*consumer_handle = teth_ctx->ipa_usb_pipe_hdl;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_link_protocol;
+static struct dentry *dfile_get_aggr_params;
+static struct dentry *dfile_set_aggr_protocol;
+
+static ssize_t teth_debugfs_read_link_protocol(struct file *file,
+					       char __user *ubuf,
+					       size_t count,
+					       loff_t *ppos)
+{
+	int nbytes;
+
+	nbytes = scnprintf(dbg_buff, TETH_MAX_MSG_LEN, "Link protocol = %s\n",
+			   (teth_ctx->link_protocol ==
+				TETH_LINK_PROTOCOL_ETHERNET) ?
+			   "ETHERNET" :
+			   "IP");
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t teth_debugfs_write_link_protocol(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	unsigned long missing;
+	enum teth_link_protocol_type link_protocol;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	if (count > 0)
+		dbg_buff[count-1] = '\0';
+
+	if (strcmp(dbg_buff, "ETHERNET") == 0) {
+		link_protocol = TETH_LINK_PROTOCOL_ETHERNET;
+	} else if (strcmp(dbg_buff, "IP") == 0) {
+		link_protocol = TETH_LINK_PROTOCOL_IP;
+	} else {
+		TETH_ERR("Bad link protocol, got %s,\n"
+			 "Use <ETHERNET> or <IP>.\n", dbg_buff);
+		return count;
+	}
+
+	teth_set_bridge_mode(link_protocol);
+
+	return count;
+}
+
+static ssize_t teth_debugfs_read_aggr_params(struct file *file,
+					     char __user *ubuf,
+					     size_t count,
+					     loff_t *ppos)
+{
+	int nbytes = 0;
+	char aggr_str[20];
+
+	aggr_prot_to_str(teth_ctx->aggr_params.ul.aggr_prot,
+			 aggr_str,
+			 sizeof(aggr_str)-1);
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN,
+			   "Aggregation parameters for uplink:\n");
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+			    "  Aggregation protocol: %s\n",
+			    aggr_str);
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+			    "  Max transfer size [byte]: %d\n",
+			    teth_ctx->aggr_params.ul.max_transfer_size_byte);
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+			    "  Max datagrams: %d\n",
+			    teth_ctx->aggr_params.ul.max_datagrams);
+
+	aggr_prot_to_str(teth_ctx->aggr_params.dl.aggr_prot,
+			 aggr_str,
+			 sizeof(aggr_str)-1);
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN,
+			   "Aggregation parameters for downlink:\n");
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+			    "  Aggregation protocol: %s\n",
+			    aggr_str);
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+			    "  Max transfer size [byte]: %d\n",
+			    teth_ctx->aggr_params.dl.max_transfer_size_byte);
+	nbytes += scnprintf(&dbg_buff[nbytes], TETH_MAX_MSG_LEN - nbytes,
+			    "  Max datagrams: %d\n",
+			    teth_ctx->aggr_params.dl.max_datagrams);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t teth_debugfs_set_aggr_protocol(struct file *file,
+				      const char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	enum teth_aggr_protocol_type aggr_prot;
+	int res;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	if (count > 0)
+		dbg_buff[count-1] = '\0';
+
+	set_aggr_default_params(&teth_ctx->aggr_params.dl);
+	set_aggr_default_params(&teth_ctx->aggr_params.ul);
+
+	if (strcmp(dbg_buff, "NONE") == 0) {
+		aggr_prot = TETH_AGGR_PROTOCOL_NONE;
+	} else if (strcmp(dbg_buff, "MBIM") == 0) {
+		aggr_prot = TETH_AGGR_PROTOCOL_MBIM;
+	} else if (strcmp(dbg_buff, "TLP") == 0) {
+		aggr_prot = TETH_AGGR_PROTOCOL_TLP;
+	} else {
+		TETH_ERR("Bad aggregation protocol, got %s,\n"
+			 "Use <NONE>, <MBIM> or <TLP>.\n", dbg_buff);
+		return count;
+	}
+
+	teth_ctx->aggr_params.dl.aggr_prot = aggr_prot;
+	teth_ctx->aggr_params.ul.aggr_prot = aggr_prot;
+	teth_ctx->aggr_params_known = true;
+
+	res = teth_set_aggregation();
+	if (res)
+		TETH_ERR("Failed setting aggregation params\n");
+
+	return count;
+}
+
+const struct file_operations teth_link_protocol_ops = {
+	.read = teth_debugfs_read_link_protocol,
+	.write = teth_debugfs_write_link_protocol,
+};
+
+const struct file_operations teth_get_aggr_params_ops = {
+	.read = teth_debugfs_read_aggr_params,
+};
+
+const struct file_operations teth_set_aggr_protocol_ops = {
+	.write = teth_debugfs_set_aggr_protocol,
+};
+
+void teth_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP | S_IWOTH;
+
+	dent = debugfs_create_dir("ipa_teth", 0);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder ipa_teth debug_fs.\n");
+		return;
+	}
+
+	dfile_link_protocol =
+		debugfs_create_file("link_protocol", read_write_mode, dent, 0,
+				    &teth_link_protocol_ops);
+	if (!dfile_link_protocol || IS_ERR(dfile_link_protocol)) {
+		IPAERR("fail to create file link_protocol\n");
+		goto fail;
+	}
+
+	dfile_get_aggr_params =
+		debugfs_create_file("get_aggr_params", read_only_mode, dent, 0,
+				    &teth_get_aggr_params_ops);
+	if (!dfile_get_aggr_params || IS_ERR(dfile_get_aggr_params)) {
+		IPAERR("fail to create file get_aggr_params\n");
+		goto fail;
+	}
+
+	dfile_set_aggr_protocol =
+		debugfs_create_file("set_aggr_protocol", read_only_mode, dent,
+				    0, &teth_set_aggr_protocol_ops);
+	if (!dfile_set_aggr_protocol || IS_ERR(dfile_set_aggr_protocol)) {
+		IPAERR("fail to create file set_aggr_protocol\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+#else
+void teth_debugfs_init(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+
+static const struct file_operations teth_bridge_drv_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = teth_bridge_ioctl,
+};
+
+/**
+* teth_bridge_driver_init() - Initialize tethering bridge driver
+*
+*/
+int teth_bridge_driver_init(void)
+{
+	int res;
+
+	TETH_DBG("Tethering bridge driver init\n");
+	teth_ctx = kzalloc(sizeof(*teth_ctx), GFP_KERNEL);
+	if (!teth_ctx) {
+		TETH_ERR("kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	set_aggr_capabilities();
+
+	teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
+
+	res = alloc_chrdev_region(&teth_ctx->dev_num, 0, 1,
+				  TETH_BRIDGE_DRV_NAME);
+	if (res) {
+		TETH_ERR("alloc_chrdev_region err.\n");
+		res = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	teth_ctx->dev = device_create(teth_ctx->class, NULL, teth_ctx->dev_num,
+				      teth_ctx, TETH_BRIDGE_DRV_NAME);
+	if (IS_ERR(teth_ctx->dev)) {
+		TETH_ERR(":device_create err.\n");
+		res = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&teth_ctx->cdev, &teth_bridge_drv_fops);
+	teth_ctx->cdev.owner = THIS_MODULE;
+	teth_ctx->cdev.ops = &teth_bridge_drv_fops;
+
+	res = cdev_add(&teth_ctx->cdev, teth_ctx->dev_num, 1);
+	if (res) {
+		TETH_ERR(":cdev_add err=%d\n", -res);
+		res = -ENODEV;
+		goto fail_cdev_add;
+	}
+
+	teth_ctx->comp_hw_bridge_in_progress = false;
+
+	teth_debugfs_init();
+	TETH_DBG("Tethering bridge driver init OK\n");
+
+	return 0;
+fail_cdev_add:
+	device_destroy(teth_ctx->class, teth_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(teth_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	kfree(teth_ctx->aggr_caps);
+	kfree(teth_ctx);
+	teth_ctx = NULL;
+
+	return res;
+}
+EXPORT_SYMBOL(teth_bridge_driver_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tethering bridge driver");
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index 1064086..0f81285 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -992,7 +992,11 @@
 {
 	SPS_DBG2("sps:%s:bam=0x%x(va).pipe=%d.", __func__, (u32) base, pipe);
 
-	bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
+	if (bam_read_reg_field(base, P_CTRL(pipe), P_EN))
+		SPS_DBG2("sps:bam=0x%x(va).pipe=%d is already enabled.\n",
+			(u32) base, pipe);
+	else
+		bam_write_reg_field(base, P_CTRL(pipe), P_EN, 1);
 }
 
 /**
diff --git a/drivers/platform/msm/ssbi.c b/drivers/platform/msm/ssbi.c
index a08eb48..e0bbdd1 100644
--- a/drivers/platform/msm/ssbi.c
+++ b/drivers/platform/msm/ssbi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  * Copyright (c) 2010, Google Inc.
  *
  * Original authors: Code Aurora Forum
@@ -362,7 +362,7 @@
 
 	ssbi->base = ioremap(mem_res->start, resource_size(mem_res));
 	if (!ssbi->base) {
-		pr_err("ioremap of 0x%p failed\n", (void *)mem_res->start);
+		pr_err("ioremap failed: %pr\n", mem_res);
 		ret = -EINVAL;
 		goto err_ioremap;
 	}
diff --git a/drivers/platform/msm/ssm.c b/drivers/platform/msm/ssm.c
new file mode 100644
index 0000000..c57bb91
--- /dev/null
+++ b/drivers/platform/msm/ssm.c
@@ -0,0 +1,931 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm Secure Service Module(SSM) driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/ion.h>
+#include <linux/types.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ion.h>
+#include <linux/platform_data/qcom_ssm.h>
+#include <mach/scm.h>
+#include <mach/msm_smd.h>
+
+#include "ssm.h"
+
+/* Macros */
+#define SSM_DEV_NAME			"ssm"
+#define MPSS_SUBSYS			0
+#define SSM_INFO_CMD_ID			1
+#define QSEOS_CHECK_VERSION_CMD		0x00001803
+
+#define MAX_APP_NAME_SIZE		32
+#define SSM_MSG_LEN			(104  + 4) /* bytes + pad */
+#define SSM_MSG_FIELD_LEN		11
+#define SSM_HEADER_LEN			(SSM_MSG_FIELD_LEN * 4)
+#define ATOM_MSG_LEN			(SSM_HEADER_LEN + SSM_MSG_LEN)
+#define FIRMWARE_NAME			"ssmapp"
+#define TZAPP_NAME			"SsmApp"
+#define CHANNEL_NAME			"SSM_RTR"
+
+#define ALIGN_BUFFER(size)		((size + 4095) & ~4095)
+
+/* SSM driver structure.*/
+struct ssm_driver {
+	int32_t app_id;
+	int32_t app_status;
+	int32_t update_status;
+	int32_t atom_replay;
+	int32_t mtoa_replay;
+	uint32_t buff_len;
+	unsigned char *channel_name;
+	unsigned char *smd_buffer;
+	struct ion_client *ssm_ion_client;
+	struct ion_handle *ssm_ion_handle;
+	struct tzapp_get_mode_info_rsp *resp;
+	struct device *dev;
+	smd_channel_t *ch;
+	ion_phys_addr_t buff_phys;
+	ion_virt_addr_t buff_virt;
+	dev_t ssm_device_no;
+	struct work_struct ipc_work;
+	struct mutex mutex;
+	bool key_status;
+	bool ready;
+};
+
+static struct ssm_driver *ssm_drv;
+
+static unsigned int getint(char *buff, unsigned long *res)
+{
+	char value[SSM_MSG_FIELD_LEN];
+
+	memcpy(value, buff, SSM_MSG_FIELD_LEN);
+	value[SSM_MSG_FIELD_LEN - 1] = '\0';
+
+	return kstrtoul(skip_spaces(value), 10, res);
+}
+
+/*
+ * Send packet to modem over SMD channel.
+ */
+static int update_modem(enum ssm_ipc_req ipc_req, struct ssm_driver *ssm,
+		int length, char *data)
+{
+	unsigned int packet_len = SSM_HEADER_LEN + length + 1;
+	int rc = 0;
+
+	ssm->atom_replay += 1;
+	snprintf(ssm->smd_buffer, SSM_HEADER_LEN + 1, "%10u|%10u|%10u|%10u|"
+			, packet_len, ssm->atom_replay, ipc_req, length);
+	memcpy(ssm->smd_buffer + SSM_HEADER_LEN, data, length);
+
+	ssm->smd_buffer[packet_len - 1] = '|';
+
+	if (smd_write_avail(ssm->ch) < packet_len) {
+		dev_err(ssm->dev, "Not enough space dropping request\n");
+		rc = -ENOSPC;
+	}
+
+	rc = smd_write(ssm->ch, ssm->smd_buffer, packet_len);
+	if (rc < packet_len) {
+		dev_err(ssm->dev, "smd_write failed for %d\n", ipc_req);
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+/*
+ * Header Format
+ * Each member of header is of 10 byte (ASCII).
+ * Each entry is separated by '|' delimiter.
+ * |<-10 bytes->|<-10 bytes->|<-10 bytes->|<-10 bytes->|<-10 bytes->|
+ * |-----------------------------------------------------------------
+ * | length     | replay no. | request    | msg_len    | message    |
+ * |-----------------------------------------------------------------
+ *
+ */
+static int  decode_header(char *buffer, int length,
+		struct ssm_common_msg *pkt)
+{
+	int rc;
+
+	rc =  getint(buffer, &pkt->pktlen);
+	if (rc < 0)
+		return -EINVAL;
+
+	buffer += SSM_MSG_FIELD_LEN;
+	rc =  getint(buffer, &pkt->replaynum);
+	if (rc < 0)
+		return -EINVAL;
+
+	buffer += SSM_MSG_FIELD_LEN;
+	rc =  getint(buffer, (unsigned long *)&pkt->ipc_req);
+	if (rc < 0)
+		return -EINVAL;
+
+	buffer += SSM_MSG_FIELD_LEN;
+	rc =  getint(buffer, &pkt->msg_len);
+	if ((rc < 0) || (pkt->msg_len > SSM_MSG_LEN))
+		return -EINVAL;
+
+	pkt->msg = buffer + SSM_MSG_FIELD_LEN;
+
+	dev_dbg(ssm_drv->dev, "len %lu rep %lu req %d msg_len %lu\n",
+			pkt->pktlen, pkt->replaynum, pkt->ipc_req,
+			pkt->msg_len);
+	return 0;
+}
+
+/*
+ * Decode address for storing the decryption key.
+ * Only for Key Exchange
+ * Message Format
+ * |Length@Address|
+ */
+static int decode_message(char *msg, unsigned int len, unsigned long *length,
+		unsigned long *address)
+{
+	int i = 0, rc = 0;
+	char *buff;
+
+	buff = kzalloc(len, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+	while (i < len) {
+		if (msg[i] == '@')
+			break;
+		i++;
+	}
+	if ((i < len) && (msg[i] == '@')) {
+		memcpy(buff, msg, i);
+		buff[i] = '\0';
+		rc = kstrtoul(skip_spaces(buff), 10, length);
+		if (rc || (length <= 0)) {
+			rc = -EINVAL;
+			goto exit;
+		}
+		memcpy(buff, &msg[i + 1], len - (i + 1));
+		buff[len - i] = '\0';
+		rc = kstrtoul(skip_spaces(buff), 10, address);
+	} else
+		rc = -EINVAL;
+
+exit:
+	kfree(buff);
+	return rc;
+}
+
+static void process_message(int cmd, char *msg, int len,
+		struct ssm_driver *ssm)
+{
+	int rc;
+	unsigned long key_len = 0, key_add = 0, val;
+	struct ssm_keyexchg_req req;
+
+	switch (cmd) {
+	case SSM_MTOA_KEY_EXCHANGE:
+		if (len < 3) {
+			dev_err(ssm->dev, "Invalid message\n");
+			break;
+		}
+
+		if (ssm->key_status) {
+			dev_err(ssm->dev, "Key exchange already done\n");
+			break;
+		}
+
+		rc = decode_message(msg, len, &key_len, &key_add);
+		if (rc) {
+			rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
+					1, "1");
+			break;
+		}
+
+		/*
+		 * We are doing key-exchange part here as it is very
+		 * specific for this case. For all other tz
+		 * communication we have generic function.
+		 */
+		req.ssid = MPSS_SUBSYS;
+		req.address = (void *)key_add;
+		req.length = key_len;
+		req.status = (uint32_t *)ssm->buff_phys;
+
+		*(unsigned int *)ssm->buff_virt = -1;
+		rc = scm_call(KEY_EXCHANGE, 0x1, &req,
+				sizeof(struct ssm_keyexchg_req), NULL, 0);
+		if (rc) {
+			dev_err(ssm->dev, "Call for key exchg failed %d", rc);
+			rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
+								1, "1");
+		} else {
+			/* Success encode packet and update modem */
+			rc = update_modem(SSM_ATOM_KEY_STATUS, ssm,
+					1, "0");
+			ssm->key_status = true;
+		}
+		break;
+
+	case SSM_MTOA_MODE_UPDATE_STATUS:
+		msg[len] = '\0';
+		rc = kstrtoul(skip_spaces(msg), 10, &val);
+		if (val) {
+			dev_err(ssm->dev, "Modem mode update failed\n");
+			ssm->update_status = FAILED;
+		} else
+			ssm->update_status = SUCCESS;
+
+		dev_dbg(ssm->dev, "Modem mode update status %lu\n", val);
+		break;
+
+	default:
+		dev_dbg(ssm->dev, "Invalid message\n");
+		break;
+	};
+}
+
+/*
+ * Work function to handle and process packets coming from modem.
+ */
+static void ssm_app_modem_work_fn(struct work_struct *work)
+{
+	int sz, rc;
+	struct ssm_common_msg pkt;
+	struct ssm_driver *ssm;
+
+	ssm = container_of(work, struct ssm_driver, ipc_work);
+
+	mutex_lock(&ssm->mutex);
+	sz = smd_cur_packet_size(ssm->ch);
+	if ((sz <= 0) || (sz > ATOM_MSG_LEN)) {
+		dev_dbg(ssm_drv->dev, "Garbled message size\n");
+		goto unlock;
+	}
+
+	if (smd_read_avail(ssm->ch) < sz) {
+		dev_err(ssm_drv->dev, "SMD error data in channel\n");
+		goto unlock;
+	}
+
+	if (sz < SSM_HEADER_LEN) {
+		dev_err(ssm_drv->dev, "Invalid packet\n");
+		goto unlock;
+	}
+
+	if (smd_read(ssm->ch, ssm->smd_buffer, sz) != sz) {
+		dev_err(ssm_drv->dev, "Incomplete data\n");
+		goto unlock;
+	}
+
+	rc = decode_header(ssm->smd_buffer, sz, &pkt);
+	if (rc < 0) {
+		dev_err(ssm_drv->dev, "Corrupted header\n");
+		goto unlock;
+	}
+
+	/* Check validity of message */
+	if (ssm->mtoa_replay >= (int)pkt.replaynum) {
+		dev_err(ssm_drv->dev, "Replay attack...\n");
+		goto unlock;
+	}
+
+	if (pkt.msg[pkt.msg_len] != '|') {
+		dev_err(ssm_drv->dev, "Garbled message\n");
+		goto unlock;
+	}
+
+	ssm->mtoa_replay = pkt.replaynum;
+	process_message(pkt.ipc_req, pkt.msg, pkt.msg_len, ssm);
+
+unlock:
+	mutex_unlock(&ssm->mutex);
+}
+
+/*
+ * MODEM-APPS smd channel callback function.
+ */
+static void modem_request(void *ctxt, unsigned event)
+{
+	struct ssm_driver *ssm;
+
+	ssm = (struct ssm_driver *)ctxt;
+
+	switch (event) {
+	case SMD_EVENT_OPEN:
+	case SMD_EVENT_CLOSE:
+		dev_info(ssm->dev, "Port %s\n",
+			(event == SMD_EVENT_OPEN) ? "opened" : "closed");
+		break;
+	case SMD_EVENT_DATA:
+		if (smd_read_avail(ssm->ch) > 0)
+			schedule_work(&ssm->ipc_work);
+		break;
+	};
+}
+
+/*
+ * Communication interface between ssm driver and TZ.
+ */
+static int tz_scm_call(struct ssm_driver *ssm, void *tz_req, int tz_req_len,
+			void **tz_resp, int tz_resp_len)
+{
+	int rc;
+	struct common_req req;
+	struct common_resp resp;
+
+	memcpy((void *)ssm->buff_virt, tz_req, tz_req_len);
+
+	req.cmd_id = CLIENT_SEND_DATA_COMMAND;
+	req.app_id = ssm->app_id;
+	req.req_ptr = (void *)ssm->buff_phys;
+	req.req_len = tz_req_len;
+	req.resp_ptr = (void *)(ssm->buff_phys + tz_req_len);
+	req.resp_len = tz_resp_len;
+
+	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *) &req,
+			sizeof(req), (void *)&resp, sizeof(resp));
+	if (rc) {
+		dev_err(ssm->dev, "SCM call failed for data command\n");
+		return rc;
+	}
+
+	if (resp.result != RESULT_SUCCESS) {
+		dev_err(ssm->dev, "Data command response failure %d\n",
+				resp.result);
+		return -EINVAL;
+	}
+
+	*tz_resp = (void *)(ssm->buff_virt + tz_req_len);
+
+	return rc;
+}
+
+/*
+ * Load SSM application in TZ and start application:
+ * 1. Check if SSM application is already loaded.
+ * 2. Load SSM application firmware.
+ * 3. Start SSM application in TZ.
+ */
+static int ssm_load_app(struct ssm_driver *ssm)
+{
+	unsigned char name[MAX_APP_NAME_SIZE], *pos;
+	int rc, i, fw_count;
+	uint32_t buff_len, size = 0, ion_len;
+	struct check_app_req app_req;
+	struct scm_resp app_resp;
+	struct load_app app_img_info;
+	const struct firmware **fw, *fw_mdt;
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	struct ion_handle *ion_handle;
+	ion_phys_addr_t buff_phys;
+	ion_virt_addr_t buff_virt;
+
+	/* Check if TZ app already loaded */
+	app_req.cmd_id = APP_LOOKUP_COMMAND;
+	memcpy(app_req.app_name, TZAPP_NAME, MAX_APP_NAME_SIZE);
+
+	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &app_req,
+				sizeof(struct check_app_req),
+				&app_resp, sizeof(app_resp));
+	if (rc) {
+		dev_err(ssm->dev, "SCM call failed for LOOKUP COMMAND\n");
+		return -EINVAL;
+	}
+
+	if (app_resp.result == RESULT_FAILURE)
+		ssm->app_id = 0;
+	else
+		ssm->app_id = app_resp.data;
+
+	if (ssm->app_id) {
+		rc = 0;
+		dev_info(ssm->dev, "TZAPP already loaded...\n");
+		goto out;
+	}
+
+	/* APP not loaded get the firmware */
+	/* Get .mdt first */
+	rc =  request_firmware(&fw_mdt, FIRMWARE_NAME".mdt", ssm->dev);
+	if (rc) {
+		dev_err(ssm->dev, "Unable to get mdt file %s\n",
+						FIRMWARE_NAME".mdt");
+		rc = -EIO;
+		goto out;
+	}
+
+	if (fw_mdt->size < sizeof(*ehdr)) {
+		dev_err(ssm->dev, "Not big enough to be an elf header\n");
+		rc = -EIO;
+		goto release_mdt;
+	}
+
+	ehdr = (struct elf32_hdr *)fw_mdt->data;
+	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+		dev_err(ssm->dev, "Not an elf header\n");
+		rc = -EIO;
+		goto release_mdt;
+	}
+
+	if (ehdr->e_phnum == 0) {
+		dev_err(ssm->dev, "No loadable segments\n");
+		rc = -EIO;
+		goto release_mdt;
+	}
+
+	phdr = (const struct elf32_phdr *)(fw_mdt->data +
+					sizeof(struct elf32_hdr));
+
+	fw = kzalloc((sizeof(struct firmware *) * ehdr->e_phnum), GFP_KERNEL);
+	if (!fw) {
+		rc = -ENOMEM;
+		goto release_mdt;
+	}
+
+	/* Valid .mdt now we need to load other parts .b0* */
+	for (fw_count = 0; fw_count < ehdr->e_phnum ; fw_count++) {
+		snprintf(name, MAX_APP_NAME_SIZE, FIRMWARE_NAME".b%02d",
+								fw_count);
+		rc = request_firmware(&fw[fw_count], name, ssm->dev);
+		if (rc < 0) {
+			rc = -EIO;
+			dev_err(ssm->dev, "Unable to get blob file\n");
+			goto release_blob;
+		}
+
+		if (fw[fw_count]->size != phdr->p_filesz) {
+			dev_err(ssm->dev, "Blob size %u doesn't match %u\n",
+					fw[fw_count]->size, phdr->p_filesz);
+			rc = -EIO;
+			goto release_blob;
+		}
+
+		phdr++;
+		size += fw[fw_count]->size;
+	}
+
+	/* Ion allocation for loading tzapp */
+	/* ION buffer size 4k aligned */
+	ion_len = ALIGN_BUFFER(size);
+	ion_handle = ion_alloc(ssm_drv->ssm_ion_client,
+			ion_len, SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(ion_handle)) {
+		rc = PTR_ERR(ion_handle);
+		dev_err(ssm->dev, "Unable to get ion handle\n");
+		goto release_blob;
+	}
+
+	rc = ion_phys(ssm_drv->ssm_ion_client, ion_handle,
+			&buff_phys, &buff_len);
+	if (rc < 0) {
+		dev_err(ssm->dev, "Unable to get ion physical address\n");
+		goto ion_free;
+	}
+
+	if (buff_len < size) {
+		rc = -ENOMEM;
+		goto ion_free;
+	}
+
+	buff_virt =
+		(ion_virt_addr_t)ion_map_kernel(ssm_drv->ssm_ion_client,
+				ion_handle);
+	if (IS_ERR_OR_NULL((void *)buff_virt)) {
+		rc = PTR_ERR((void *)buff_virt);
+		dev_err(ssm->dev, "Unable to get ion virtual address\n");
+		goto ion_free;
+	}
+
+	/* Copy firmware to ION memory */
+	memcpy((unsigned char *)buff_virt, fw_mdt->data, fw_mdt->size);
+	pos = (unsigned char *)buff_virt + fw_mdt->size;
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		memcpy(pos, fw[i]->data, fw[i]->size);
+		pos += fw[i]->size;
+	}
+
+	/* Loading app */
+	app_img_info.cmd_id = APP_START_COMMAND;
+	app_img_info.mdt_len = fw_mdt->size;
+	app_img_info.img_len = size;
+	app_img_info.phy_addr = buff_phys;
+
+	/* SCM call to load the TZ APP */
+	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &app_img_info,
+		sizeof(struct load_app), &app_resp, sizeof(app_resp));
+	if (rc) {
+		rc = -EIO;
+		dev_err(ssm->dev, "SCM call to load APP failed\n");
+		goto ion_unmap;
+	}
+
+	if (app_resp.result == RESULT_FAILURE) {
+		rc = -EIO;
+		dev_err(ssm->dev, "SCM command to load TzAPP failed\n");
+		goto ion_unmap;
+	}
+
+	ssm->app_id = app_resp.data;
+	ssm->app_status = SUCCESS;
+
+ion_unmap:
+	ion_unmap_kernel(ssm_drv->ssm_ion_client, ion_handle);
+ion_free:
+	ion_free(ssm_drv->ssm_ion_client, ion_handle);
+release_blob:
+	while (--fw_count >= 0)
+		release_firmware(fw[fw_count]);
+	kfree(fw);
+release_mdt:
+	release_firmware(fw_mdt);
+out:
+	return rc;
+}
+
+/*
+ * Allocate buffer for transactions.
+ */
+static int ssm_setup_ion(struct ssm_driver *ssm)
+{
+	int rc = 0;
+	unsigned int size;
+
+	size = ALIGN_BUFFER(ATOM_MSG_LEN);
+
+	/* ION client for communicating with TZ */
+	ssm->ssm_ion_client = msm_ion_client_create(UINT_MAX,
+							"ssm-kernel");
+	if (IS_ERR_OR_NULL(ssm->ssm_ion_client)) {
+		rc = PTR_ERR(ssm->ssm_ion_client);
+		dev_err(ssm->dev, "Ion client not created\n");
+		return rc;
+	}
+
+	/* Setup a small ION buffer for tz communication */
+	ssm->ssm_ion_handle = ion_alloc(ssm->ssm_ion_client,
+				size, SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(ssm->ssm_ion_handle)) {
+		rc = PTR_ERR(ssm->ssm_ion_handle);
+		dev_err(ssm->dev, "Unable to get ion handle\n");
+		goto out;
+	}
+
+	rc = ion_phys(ssm->ssm_ion_client, ssm->ssm_ion_handle,
+			&ssm->buff_phys, &ssm->buff_len);
+	if (rc < 0) {
+		dev_err(ssm->dev,
+			"Unable to get ion buffer physical address\n");
+		goto ion_free;
+	}
+
+	if (ssm->buff_len < size) {
+		rc = -ENOMEM;
+		goto ion_free;
+	}
+
+	ssm->buff_virt =
+		(ion_virt_addr_t)ion_map_kernel(ssm->ssm_ion_client,
+				ssm->ssm_ion_handle);
+	if (IS_ERR_OR_NULL((void *)ssm->buff_virt)) {
+		rc = PTR_ERR((void *)ssm->buff_virt);
+		dev_err(ssm->dev,
+			"Unable to get ion buffer virtual address\n");
+		goto ion_free;
+	}
+
+	return rc;
+
+ion_free:
+	ion_free(ssm->ssm_ion_client, ssm->ssm_ion_handle);
+out:
+	ion_client_destroy(ssm_drv->ssm_ion_client);
+	return rc;
+}
+
+static struct ssm_platform_data *populate_ssm_pdata(struct device *dev)
+{
+	struct ssm_platform_data *pdata;
+	int rc;
+
+	pdata = devm_kzalloc(dev, sizeof(struct ssm_platform_data),
+								GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	pdata->need_key_exchg =
+		of_property_read_bool(dev->of_node, "qcom,need-keyexhg");
+
+	rc = of_property_read_string(dev->of_node, "qcom,channel-name",
+							&pdata->channel_name);
+	if (rc && rc != -EINVAL) {
+		dev_err(dev, "Error reading channel_name property %d\n", rc);
+		return NULL;
+	} else if (rc == -EINVAL)
+		pdata->channel_name = CHANNEL_NAME;
+
+	return pdata;
+}
+
+static int __devinit ssm_probe(struct platform_device *pdev)
+{
+	int rc;
+	uint32_t system_call_id;
+	char legacy = '\0';
+	struct ssm_platform_data *pdata;
+	struct ssm_driver *drv;
+
+	if (pdev->dev.of_node)
+		pdata = populate_ssm_pdata(&pdev->dev);
+	else
+		pdata = pdev->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "Empty platform data\n");
+		return -ENOMEM;
+	}
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(struct ssm_driver),
+								GFP_KERNEL);
+	if (!drv) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Initialize the driver structure */
+	drv->atom_replay = -1;
+	drv->mtoa_replay = -1;
+	drv->app_id = -1;
+	drv->app_status = RETRY;
+	drv->ready = false;
+	drv->update_status = FAILED;
+	mutex_init(&drv->mutex);
+	drv->key_status = !pdata->need_key_exchg;
+	drv->channel_name = (char *)pdata->channel_name;
+	INIT_WORK(&drv->ipc_work, ssm_app_modem_work_fn);
+
+	/* Allocate memory for smd buffer */
+	drv->smd_buffer = devm_kzalloc(&pdev->dev,
+			(sizeof(char) * ATOM_MSG_LEN), GFP_KERNEL);
+	if (!drv->smd_buffer) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	/* Allocate response buffer */
+	drv->resp = devm_kzalloc(&pdev->dev,
+				sizeof(struct tzapp_get_mode_info_rsp),
+				GFP_KERNEL);
+	if (!drv->resp) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+
+	/* Check for TZ version */
+	system_call_id = QSEOS_CHECK_VERSION_CMD;
+	rc = scm_call(SCM_SVC_INFO, SSM_INFO_CMD_ID, &system_call_id,
+			sizeof(system_call_id), &legacy, sizeof(legacy));
+	if (rc) {
+		dev_err(&pdev->dev, "Get version failed %d\n", rc);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	/* This driver only support 1.4 TZ and QSEOS */
+	if (!legacy) {
+		dev_err(&pdev->dev,
+				"Driver doesn't support legacy version\n");
+		rc = -EINVAL;
+		goto exit;
+
+	}
+
+	/* Setup the ion buffer for transaction */
+	rc = ssm_setup_ion(drv);
+	if (rc < 0)
+		goto exit;
+
+	drv->dev = &pdev->dev;
+	ssm_drv = drv;
+	platform_set_drvdata(pdev, ssm_drv);
+
+	dev_dbg(&pdev->dev, "probe success\n");
+	return 0;
+
+exit:
+	mutex_destroy(&drv->mutex);
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+
+}
+
+static int __devexit ssm_remove(struct platform_device *pdev)
+{
+	int rc;
+
+	struct scm_shutdown_req req;
+	struct scm_resp resp;
+
+	if (!ssm_drv)
+		return 0;
+	/*
+	 * Step to exit
+	 * 1. set ready to 0 (oem access closed).
+	 * 2. Close SMD modem connection closed.
+	 * 3. cleanup ion.
+	 */
+	ssm_drv->ready = false;
+	smd_close(ssm_drv->ch);
+	flush_work_sync(&ssm_drv->ipc_work);
+
+	/* ION clean up*/
+	ion_unmap_kernel(ssm_drv->ssm_ion_client, ssm_drv->ssm_ion_handle);
+	ion_free(ssm_drv->ssm_ion_client, ssm_drv->ssm_ion_handle);
+	ion_client_destroy(ssm_drv->ssm_ion_client);
+
+	/* Shutdown tzapp */
+	req.app_id = ssm_drv->app_id;
+	req.cmd_id = APP_SHUTDOWN_COMMAND;
+	rc = scm_call(SCM_SVC_TZSCHEDULER, 1, &req, sizeof(req),
+			&resp, sizeof(resp));
+	if (rc)
+		dev_err(&pdev->dev, "TZ_app Unload failed\n");
+
+	return rc;
+}
+
+static struct of_device_id ssm_match_table[] = {
+	{
+		.compatible = "qcom,ssm",
+	},
+	{}
+};
+
+static struct platform_driver ssm_pdriver = {
+	.probe          = ssm_probe,
+	.remove         = __devexit_p(ssm_remove),
+	.driver = {
+		.name   = SSM_DEV_NAME,
+		.owner  = THIS_MODULE,
+		.of_match_table = ssm_match_table,
+	},
+};
+module_platform_driver(ssm_pdriver);
+
+/*
+ * Interface for external OEM driver.
+ * This interface supports following functionalities:
+ * 1. Get TZAPP ID.
+ * 2. Set default mode.
+ * 3. Set mode (encrypted mode and it's length is passed as parameter).
+ * 4. Set mode from TZ.
+ * 5. Get status of mode update.
+ *
+ */
+int ssm_oem_driver_intf(int cmd, char *mode, int len)
+{
+	int rc, req_len, resp_len;
+	struct tzapp_get_mode_info_req get_mode_req;
+	struct tzapp_get_mode_info_rsp *get_mode_resp;
+
+	/* If ssm_drv is NULL, probe failed */
+	if (!ssm_drv)
+		return -ENODEV;
+
+	mutex_lock(&ssm_drv->mutex);
+
+	if (ssm_drv->app_status == RETRY) {
+		/* Load TZAPP */
+		rc = ssm_load_app(ssm_drv);
+		if (rc) {
+			rc = -ENODEV;
+			ssm_drv->app_status = FAILED;
+			goto unlock;
+		}
+	} else if (ssm_drv->app_status == FAILED) {
+		rc = -ENODEV;
+		goto unlock;
+	}
+
+	/* Open modem SMD interface */
+	if (!ssm_drv->ready) {
+		rc = smd_open(ssm_drv->channel_name, &ssm_drv->ch, ssm_drv,
+							modem_request);
+		if (rc) {
+			rc = -EAGAIN;
+			goto unlock;
+		} else
+			ssm_drv->ready = true;
+	}
+
+	/* Try again modem key-exchange not yet done.*/
+	if (!ssm_drv->key_status) {
+		rc = -EAGAIN;
+		goto unlock;
+	}
+
+	/* Set return status to success */
+	rc = 0;
+
+	switch (cmd) {
+	case SSM_READY:
+		break;
+
+	case SSM_GET_APP_ID:
+		rc = ssm_drv->app_id;
+		break;
+
+	case SSM_MODE_INFO_READY:
+		ssm_drv->update_status = RETRY;
+		/* Fill command structure */
+		req_len = sizeof(struct tzapp_get_mode_info_req);
+		resp_len = sizeof(struct tzapp_get_mode_info_rsp);
+		get_mode_req.tzapp_ssm_cmd = GET_ENC_MODE;
+		rc = tz_scm_call(ssm_drv, (void *)&get_mode_req,
+				req_len, (void **)&get_mode_resp, resp_len);
+		if (rc) {
+			ssm_drv->update_status = FAILED;
+			break;
+		}
+
+		/* Send mode_info to modem */
+		rc = update_modem(SSM_ATOM_MODE_UPDATE, ssm_drv,
+				get_mode_resp->enc_mode_len,
+				get_mode_resp->enc_mode_info);
+		if (rc)
+			ssm_drv->update_status = FAILED;
+		break;
+
+	case SSM_SET_MODE:
+		ssm_drv->update_status = RETRY;
+
+		if (len > ENC_MODE_MAX_SIZE) {
+			ssm_drv->update_status = FAILED;
+			rc = -EINVAL;
+			break;
+		}
+		memcpy(ssm_drv->resp->enc_mode_info, mode, len);
+		ssm_drv->resp->enc_mode_len = len;
+
+		/* Send mode_info to modem */
+		rc = update_modem(SSM_ATOM_MODE_UPDATE, ssm_drv,
+				ssm_drv->resp->enc_mode_len,
+				ssm_drv->resp->enc_mode_info);
+		if (rc)
+			ssm_drv->update_status = FAILED;
+		break;
+
+	case SSM_GET_MODE_STATUS:
+		rc = ssm_drv->update_status;
+		break;
+
+	case SSM_SET_DEFAULT_MODE:
+		/* Modem does not send response for this */
+		ssm_drv->update_status = RETRY;
+		rc = update_modem(SSM_ATOM_SET_DEFAULT_MODE, ssm_drv,
+				1, "0");
+		if (rc)
+			ssm_drv->update_status = FAILED;
+		else
+			/* For default mode we don't get any resp
+			 * from modem.
+			 */
+			ssm_drv->update_status = SUCCESS;
+		break;
+	default:
+		rc = -EINVAL;
+		dev_err(ssm_drv->dev, "Invalid command\n");
+		break;
+	};
+
+unlock:
+	mutex_unlock(&ssm_drv->mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ssm_oem_driver_intf);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Secure Service Module");
+
diff --git a/drivers/platform/msm/ssm.h b/drivers/platform/msm/ssm.h
new file mode 100644
index 0000000..97add11
--- /dev/null
+++ b/drivers/platform/msm/ssm.h
@@ -0,0 +1,160 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SSM_H_
+#define __SSM_H_
+
+#define MAX_APP_NAME_SIZE	32
+#define MODE_INFO_MAX_SIZE	4
+#define ENC_MODE_MAX_SIZE	(100 + MODE_INFO_MAX_SIZE)
+
+/* tzapp response.*/
+enum tz_response {
+	RESULT_SUCCESS = 0,
+	RESULT_FAILURE  = 0xFFFFFFFF,
+};
+
+/* tzapp command list.*/
+enum tz_commands {
+	ENC_MODE,
+	GET_ENC_MODE,
+	KEY_EXCHANGE = 11,
+};
+
+/* Command list for QSEOS.*/
+enum qceos_cmd_id {
+	APP_START_COMMAND = 0x01,
+	APP_SHUTDOWN_COMMAND,
+	APP_LOOKUP_COMMAND,
+	CLIENT_SEND_DATA_COMMAND = 0x6,
+	QSEOS_CMD_MAX = 0xEFFFFFFF,
+};
+
+/* MODEM/SSM command list.*/
+enum ssm_ipc_req {
+	SSM_MTOA_KEY_EXCHANGE = 0x0000AAAA,
+	SSM_ATOM_KEY_STATUS,
+	SSM_ATOM_MODE_UPDATE,
+	SSM_MTOA_MODE_UPDATE_STATUS,
+	SSM_MTOA_PREV_INVALID,
+	SSM_ATOM_PREV_INVALID,
+	SSM_ATOM_SET_DEFAULT_MODE,
+	SSM_INVALID_REQ,
+};
+
+/* OEM reuest commands list.*/
+enum oem_req {
+	SSM_READY,
+	SSM_GET_APP_ID,
+	SSM_MODE_INFO_READY,
+	SSM_SET_MODE,
+	SSM_GET_MODE_STATUS,
+	SSM_SET_DEFAULT_MODE,
+	SSM_INVALID,
+};
+
+/* Modem mode update status.*/
+enum modem_mode_status {
+	SUCCESS,
+	RETRY,
+	FAILED = -1,
+};
+
+__packed struct load_app {
+	uint32_t cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint32_t phy_addr;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+/* Stop tzapp reuest.*/
+__packed struct scm_shutdown_req {
+	uint32_t cmd_id;
+	uint32_t app_id;
+};
+
+/* Common tzos response.*/
+__packed struct scm_resp {
+	uint32_t result;
+	enum tz_response resp_type;
+	unsigned int data;
+};
+
+/* tzos request.*/
+__packed struct check_app_req {
+	uint32_t cmd_id;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+/* tzapp encode mode reuest.*/
+__packed struct tzapp_mode_enc_req {
+	uint32_t tzapp_ssm_cmd;
+	uint8_t  mode_info[4];
+};
+
+/* tzapp encode mode response.*/
+__packed struct tzapp_mode_enc_rsp {
+	uint32_t tzapp_ssm_cmd;
+	uint8_t enc_mode_info[ENC_MODE_MAX_SIZE];
+	uint32_t enc_mode_len;
+	long status;
+};
+
+/* tzapp get mode request.*/
+__packed struct tzapp_get_mode_info_req {
+	uint32_t tzapp_ssm_cmd;
+};
+
+/* tzapp get mode response.*/
+__packed struct tzapp_get_mode_info_rsp {
+	uint32_t tzapp_ssm_cmd;
+	uint8_t  enc_mode_info[ENC_MODE_MAX_SIZE];
+	uint32_t enc_mode_len;
+	long status;
+};
+
+/* tzos key exchange request.*/
+__packed struct ssm_keyexchg_req {
+	uint32_t ssid;
+	void *address;
+	uint32_t length;
+	uint32_t *status;
+};
+
+/* tzos common request.*/
+__packed struct common_req {
+	uint32_t cmd_id;
+	uint32_t app_id;
+	void *req_ptr;
+	uint32_t req_len;
+	void *resp_ptr;
+	uint32_t resp_len;
+};
+
+/* tzos common response.*/
+__packed struct common_resp {
+	uint32_t result;
+	uint32_t type;
+	uint32_t data;
+};
+
+/* Modem/SSM packet format.*/
+struct ssm_common_msg {
+	unsigned long pktlen;
+	unsigned long replaynum;
+	enum ssm_ipc_req ipc_req;
+	unsigned long msg_len;
+	char *msg;
+};
+
+#endif
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 13e23e8..c5b1db4 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -128,7 +128,6 @@
 	int			catch_up_time_us;
 	enum battery_type	batt_type;
 	uint16_t		ocv_reading_at_100;
-	int			cc_reading_at_100;
 	int			max_voltage_uv;
 
 	int			chg_term_ua;
@@ -1042,10 +1041,8 @@
 	}
 
 	/* stop faking 100% after an OCV event */
-	if (chip->ocv_reading_at_100 != raw->last_good_ocv_raw) {
+	if (chip->ocv_reading_at_100 != raw->last_good_ocv_raw)
 		chip->ocv_reading_at_100 = OCV_RAW_UNINITIALIZED;
-		chip->cc_reading_at_100 = 0;
-	}
 	pr_debug("0p625 = %duV\n", chip->xoadc_v0625);
 	pr_debug("1p25 = %duV\n", chip->xoadc_v125);
 	pr_debug("last_good_ocv_raw= 0x%x, last_good_ocv_uv= %duV\n",
@@ -1185,10 +1182,7 @@
 	int64_t cc_voltage_uv, cc_pvh, cc_uah;
 
 	cc_voltage_uv = cc;
-	cc_voltage_uv -= chip->cc_reading_at_100;
-	pr_debug("cc = %d. after subtracting 0x%x cc = %lld\n",
-					cc, chip->cc_reading_at_100,
-					cc_voltage_uv);
+	pr_debug("cc = %d\n", cc);
 	cc_voltage_uv = cc_to_microvolt(chip, cc_voltage_uv);
 	cc_voltage_uv = pm8xxx_cc_adjust_for_gain(cc_voltage_uv);
 	pr_debug("cc_voltage_uv = %lld microvolts\n", cc_voltage_uv);
@@ -1513,10 +1507,7 @@
 
 	/* calculate cc micro_volt_hour */
 	calculate_cc_uah(chip, raw->cc, cc_uah);
-	pr_debug("cc_uah = %duAh raw->cc = %x cc = %lld after subtracting %x\n",
-				*cc_uah, raw->cc,
-				(int64_t)raw->cc - chip->cc_reading_at_100,
-				chip->cc_reading_at_100);
+	pr_debug("cc_uah = %duAh raw->cc = %x\n", *cc_uah, raw->cc);
 
 	soc_rbatt = ((*remaining_charge_uah - *cc_uah) * 100) / *fcc_uah;
 	if (soc_rbatt < 0)
@@ -2653,19 +2644,20 @@
 
 	if (is_battery_full) {
 		the_chip->ocv_reading_at_100 = raw.last_good_ocv_raw;
-		the_chip->cc_reading_at_100 = raw.cc;
 
 		the_chip->last_ocv_uv = the_chip->max_voltage_uv;
 		raw.last_good_ocv_uv = the_chip->max_voltage_uv;
+		raw.cc = 0;
+		/* reset the cc in h/w */
+		reset_cc(the_chip);
 		the_chip->last_ocv_temp_decidegc = batt_temp;
 		/*
 		 * since we are treating this as an ocv event
 		 * forget the old cc value
 		 */
 		the_chip->last_cc_uah = 0;
-		pr_debug("EOC BATT_FULL ocv_reading = 0x%x cc = 0x%x\n",
-				the_chip->ocv_reading_at_100,
-				the_chip->cc_reading_at_100);
+		pr_debug("EOC BATT_FULL ocv_reading = 0x%x\n",
+				the_chip->ocv_reading_at_100);
 	}
 
 	the_chip->end_percent = calculate_state_of_charge(the_chip, &raw,
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index f87a443..03b3e0d 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -255,12 +255,9 @@
 	struct dentry			*dent;
 	struct bms_notify		bms_notify;
 	int				*usb_trim_table;
-	struct regulator		*vreg_xoadc;
 	bool				ext_charging;
 	bool				ext_charge_done;
 	bool				iusb_fine_res;
-	bool				final_kickstart;
-	bool				lockup_lpm_wrkarnd;
 	DECLARE_BITMAP(enabled_irqs, PM_CHG_MAX_INTS);
 	struct work_struct		battery_id_valid_work;
 	int64_t				batt_id_min;
@@ -296,6 +293,7 @@
 	int				stop_chg_upon_expiry;
 	bool				disable_aicl;
 	int				usb_type;
+	bool				disable_chg_rmvl_wrkarnd;
 };
 
 /* user space parameter to limit usb current */
@@ -311,7 +309,6 @@
 
 static struct pm8921_chg_chip *the_chip;
 
-static DEFINE_SPINLOCK(lpm_lock);
 #define LPM_ENABLE_BIT	BIT(2)
 static int pm8921_chg_set_lpm(struct pm8921_chg_chip *chip, int enable)
 {
@@ -340,66 +337,11 @@
 static int pm_chg_write(struct pm8921_chg_chip *chip, u16 addr, u8 reg)
 {
 	int rc;
-	unsigned long flags = 0;
-	u8 temp;
 
-	/* Disable LPM */
-	if (chip->lockup_lpm_wrkarnd) {
-		spin_lock_irqsave(&lpm_lock, flags);
+	rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
+	if (rc)
+		pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
 
-		/*
-		 * This delay is to prevent exit out of 32khz mode within
-		 * 200uS. It could be that chg was removed just few uS before
-		 * this gets called.
-		 */
-		udelay(200);
-		/* no clks */
-		temp = 0xD1;
-		rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (rc) {
-			pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
-			goto release_lpm_lock;
-		}
-
-		/* force 19.2Mhz before reading */
-		temp = 0xD3;
-		rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (rc) {
-			pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
-			goto release_lpm_lock;
-		}
-
-		rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
-		if (rc) {
-			pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
-			goto release_lpm_lock;
-		}
-
-		/* no clks */
-		temp = 0xD1;
-		rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (rc) {
-			pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
-			goto release_lpm_lock;
-		}
-
-		/* switch to hw clk selection */
-		temp = 0xD0;
-		rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (rc) {
-			pr_err("Error %d writing %d to CHG_TEST\n", rc, temp);
-			goto release_lpm_lock;
-		}
-
-		udelay(200);
-
-release_lpm_lock:
-		spin_unlock_irqrestore(&lpm_lock, flags);
-	} else {
-		rc = pm8xxx_writeb(chip->dev->parent, addr, reg);
-		if (rc)
-			pr_err("failed: addr=%03X, rc=%d\n", addr, rc);
-	}
 	return rc;
 }
 
@@ -430,23 +372,6 @@
 					chip->pmic_chg_irq[irq_id]);
 }
 
-static int is_chg_on_bat(struct pm8921_chg_chip *chip)
-{
-	return !(pm_chg_get_rt_status(chip, DCIN_VALID_IRQ)
-			|| pm_chg_get_rt_status(chip, USBIN_VALID_IRQ));
-}
-
-static void pm8921_chg_bypass_bat_gone_debounce(struct pm8921_chg_chip *chip,
-		int bypass)
-{
-	int rc;
-
-	rc = pm_chg_write(chip, COMPARATOR_OVERRIDE, bypass ? 0x89 : 0x88);
-	if (rc) {
-		pr_err("Failed to set bypass bit to %d rc=%d\n", bypass, rc);
-	}
-}
-
 /* Treat OverVoltage/UnderVoltage as source missing */
 static int is_usb_chg_plugged_in(struct pm8921_chg_chip *chip)
 {
@@ -469,35 +394,8 @@
 static int pm_chg_get_fsm_state(struct pm8921_chg_chip *chip)
 {
 	u8 temp;
-	unsigned long flags = 0;
 	int err = 0, ret = 0;
 
-	if (chip->lockup_lpm_wrkarnd) {
-		spin_lock_irqsave(&lpm_lock, flags);
-
-		/*
-		 * This delay is to prevent exit out of 32khz mode within
-		 * 200uS. It could be that chg was removed just few uS before
-		 * this gets called.
-		 */
-		udelay(200);
-		/* no clks */
-		temp = 0xD1;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-
-		/* force 19.2Mhz before reading */
-		temp = 0xD3;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-	}
-
 	temp = CAPTURE_FSM_STATE_CMD;
 	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
 	if (err) {
@@ -535,29 +433,7 @@
 	/* get the upper 1 bit */
 	ret |= (temp & 0x1) << 4;
 
-	if (chip->lockup_lpm_wrkarnd) {
-		/* no clks */
-		temp = 0xD1;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-
-		/* switch to hw clk selection */
-		temp = 0xD0;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-
-		udelay(200);
-	}
-
 err_out:
-	if (chip->lockup_lpm_wrkarnd)
-		spin_unlock_irqrestore(&lpm_lock, flags);
 	if (err)
 		return err;
 
@@ -568,35 +444,8 @@
 static int pm_chg_get_regulation_loop(struct pm8921_chg_chip *chip)
 {
 	u8 temp, data;
-	unsigned long flags = 0;
 	int err = 0;
 
-	if (chip->lockup_lpm_wrkarnd) {
-		spin_lock_irqsave(&lpm_lock, flags);
-
-		/*
-		 * This delay is to prevent exit out of 32khz mode within
-		 * 200uS. It could be that chg was removed just few uS before
-		 * this gets called.
-		 */
-		udelay(200);
-		/* no clks */
-		temp = 0xD1;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-
-		/* force 19.2Mhz before reading */
-		temp = 0xD3;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-	}
-
 	temp = READ_BANK_6;
 	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
 	if (err) {
@@ -610,29 +459,7 @@
 		goto err_out;
 	}
 
-	if (chip->lockup_lpm_wrkarnd) {
-		/* no clks */
-		temp = 0xD1;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-
-		/* switch to hw clk selection */
-		temp = 0xD0;
-		err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-		if (err) {
-			pr_err("Error %d writing %d to CHG_TEST\n", err, temp);
-			goto err_out;
-		}
-
-		udelay(200);
-	}
-
 err_out:
-	if (chip->lockup_lpm_wrkarnd)
-		spin_unlock_irqrestore(&lpm_lock, flags);
 	if (err)
 		return err;
 
@@ -2099,10 +1926,10 @@
 	 * This would also apply when the battery has been
 	 * removed from the running system.
 	 */
-	if (the_chip && !get_prop_batt_present(the_chip)
+	if (mA == 0 && the_chip && !get_prop_batt_present(the_chip)
 		&& !is_dc_chg_plugged_in(the_chip)) {
 		if (!the_chip->has_dc_supply) {
-			pr_err("rejected: no other power source connected\n");
+			pr_err("rejected: no other power source mA = %d\n", mA);
 			return;
 		}
 	}
@@ -2377,96 +2204,9 @@
 	return get_prop_batt_temp(the_chip);
 }
 
-static int __pm8921_apply_19p2mhz_kickstart(struct pm8921_chg_chip *chip)
-{
-	int err;
-	u8 temp;
-
-
-	temp  = 0xD1;
-	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-	if (err) {
-		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
-		return err;
-	}
-
-	temp  = 0xD3;
-	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-	if (err) {
-		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
-		return err;
-	}
-
-	temp  = 0xD1;
-	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-	if (err) {
-		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
-		return err;
-	}
-
-	temp  = 0xD5;
-	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-	if (err) {
-		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
-		return err;
-	}
-
-	/* Wait a few clock cycles before re-enabling hw clock switching */
-	udelay(183);
-
-	temp  = 0xD1;
-	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-	if (err) {
-		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
-		return err;
-	}
-
-	temp  = 0xD0;
-	err = pm8xxx_writeb(chip->dev->parent, CHG_TEST, temp);
-	if (err) {
-		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
-		return err;
-	}
-
-	/* Wait for few clock cycles before re-enabling LPM */
-	udelay(32);
-
-	return 0;
-}
-
-static int pm8921_apply_19p2mhz_kickstart(struct pm8921_chg_chip *chip)
-{
-	int err;
-	unsigned long flags = 0;
-
-	spin_lock_irqsave(&lpm_lock, flags);
-	err = pm8921_chg_set_lpm(chip, 0);
-	if (err) {
-		pr_err("Error settig LPM rc=%d\n", err);
-		goto kick_err;
-	}
-
-	__pm8921_apply_19p2mhz_kickstart(chip);
-
-kick_err:
-	err = pm8921_chg_set_lpm(chip, 1);
-	if (err)
-		pr_err("Error settig LPM rc=%d\n", err);
-
-	spin_unlock_irqrestore(&lpm_lock, flags);
-
-	return err;
-}
-
 static void handle_usb_insertion_removal(struct pm8921_chg_chip *chip)
 {
-	int usb_present, rc = 0;
-
-	if (chip->lockup_lpm_wrkarnd) {
-		rc = pm8921_apply_19p2mhz_kickstart(chip);
-		if (rc)
-			pr_err("Failed to apply kickstart rc=%d\n", rc);
-	}
+	int usb_present;
 
 	pm_chg_failed_clear(chip, 1);
 	usb_present = is_usb_chg_plugged_in(chip);
@@ -2476,11 +2216,6 @@
 		power_supply_changed(&chip->usb_psy);
 		power_supply_changed(&chip->batt_psy);
 		pm8921_bms_calibrate_hkadc();
-
-		/* Enable/disable bypass if charger is on battery */
-		if (chip->lockup_lpm_wrkarnd)
-			pm8921_chg_bypass_bat_gone_debounce(chip,
-				is_chg_on_bat(chip));
 	}
 	if (usb_present) {
 		schedule_delayed_work(&chip->unplug_check_work,
@@ -2496,10 +2231,6 @@
 
 static void handle_stop_ext_chg(struct pm8921_chg_chip *chip)
 {
-	if (chip->lockup_lpm_wrkarnd)
-		/* Enable bypass if charger is on battery */
-		pm8921_chg_bypass_bat_gone_debounce(chip, is_chg_on_bat(chip));
-
 	if (!chip->ext_psy) {
 		pr_debug("external charger not registered.\n");
 		return;
@@ -2529,10 +2260,6 @@
 	unsigned long delay =
 		round_jiffies_relative(msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
 
-	/* Disable bypass if charger connected and not running on bat */
-	if (chip->lockup_lpm_wrkarnd)
-		pm8921_chg_bypass_bat_gone_debounce(chip, is_chg_on_bat(chip));
-
 	if (!chip->ext_psy) {
 		pr_debug("external charger not registered.\n");
 		return;
@@ -3014,28 +2741,12 @@
 				pm_chg_get_fsm_state(chip),
 				get_prop_batt_current(chip)
 				);
-			if (chip->lockup_lpm_wrkarnd) {
-				rc = pm8921_apply_19p2mhz_kickstart(chip);
-				if (rc)
-					pr_err("Failed kickstart rc=%d\n", rc);
-
-				/*
-				 * Make sure kickstart happens at least 200 ms
-				 * after charger has been removed.
-				 */
-				if (chip->final_kickstart) {
-					chip->final_kickstart = false;
-					goto check_again_later;
-				}
-			}
 			return;
 		} else {
 			goto check_again_later;
 		}
 	}
 
-	chip->final_kickstart = true;
-
 	/* AICL only for usb wall charger */
 	if ((active_path & USB_ACTIVE_BIT) && usb_target_ma > 0 &&
 		!chip->disable_aicl) {
@@ -3057,7 +2768,7 @@
 	pr_debug("reg_loop=0x%x usb_ma = %d\n", reg_loop, usb_ma);
 
 	ibat = get_prop_batt_current(chip);
-	if (reg_loop & VIN_ACTIVE_BIT) {
+	if ((reg_loop & VIN_ACTIVE_BIT) && !chip->disable_chg_rmvl_wrkarnd) {
 		if (ibat > 0) {
 			pr_debug("revboost ibat = %d fsm = %d loop = 0x%x\n",
 				ibat, pm_chg_get_fsm_state(chip), reg_loop);
@@ -3077,7 +2788,8 @@
 			active_path, active_chg_plugged_in);
 	chg_gone = pm_chg_get_rt_status(chip, CHG_GONE_IRQ);
 
-	if (chg_gone == 1  && active_chg_plugged_in == 1) {
+	if (chg_gone == 1  && active_chg_plugged_in == 1 &&
+					!chip->disable_chg_rmvl_wrkarnd) {
 		pr_debug("chg_gone=%d, active_chg_plugged_in = %d\n",
 					chg_gone, active_chg_plugged_in);
 		unplug_ovp_fet_open(chip);
@@ -3328,11 +3040,6 @@
 		else
 			handle_stop_ext_chg(chip);
 	} else {
-		if (chip->lockup_lpm_wrkarnd)
-			/* if no external supply call bypass debounce here */
-			pm8921_chg_bypass_bat_gone_debounce(chip,
-				is_chg_on_bat(chip));
-
 		if (dc_present)
 			schedule_delayed_work(&chip->unplug_check_work,
 				msecs_to_jiffies(UNPLUG_CHECK_WAIT_PERIOD_MS));
@@ -4164,6 +3871,91 @@
 	return -EINVAL;
 }
 
+static void pm8921_chg_force_19p2mhz_clk(struct pm8921_chg_chip *chip)
+{
+	int err;
+	u8 temp;
+
+	temp  = 0xD1;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+
+	temp  = 0xD3;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+
+	temp  = 0xD1;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+
+	temp  = 0xD5;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+
+	udelay(183);
+
+	temp  = 0xD1;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+
+	temp  = 0xD0;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+	udelay(32);
+
+	temp  = 0xD1;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+
+	temp  = 0xD3;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+}
+
+static void pm8921_chg_set_hw_clk_switching(struct pm8921_chg_chip *chip)
+{
+	int err;
+	u8 temp;
+
+	temp  = 0xD1;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+
+	temp  = 0xD0;
+	err = pm_chg_write(chip, CHG_TEST, temp);
+	if (err) {
+		pr_err("Error %d writing %d to addr %d\n", err, temp, CHG_TEST);
+		return;
+	}
+}
+
 #define VREF_BATT_THERM_FORCE_ON	BIT(7)
 static void detect_battery_removal(struct pm8921_chg_chip *chip)
 {
@@ -4195,15 +3987,8 @@
 	u8 subrev;
 	int rc, vdd_safe, fcc_uah, safety_time = DEFAULT_SAFETY_MINUTES;
 
-	spin_lock_init(&lpm_lock);
-
-	if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8921) {
-		rc = __pm8921_apply_19p2mhz_kickstart(chip);
-		if (rc) {
-			pr_err("Failed to apply kickstart rc=%d\n", rc);
-			return rc;
-		}
-	}
+	/* forcing 19p2mhz before accessing any charger registers */
+	pm8921_chg_force_19p2mhz_clk(chip);
 
 	detect_battery_removal(chip);
 
@@ -4451,45 +4236,6 @@
 		return rc;
 	}
 
-	if (pm8xxx_get_version(chip->dev->parent) == PM8XXX_VERSION_8921) {
-		/* Clear kickstart */
-		rc = pm8xxx_writeb(chip->dev->parent, CHG_TEST, 0xD0);
-		if (rc) {
-			pr_err("Failed to clear kickstart rc=%d\n", rc);
-			return rc;
-		}
-
-		/* From here the lpm_workaround will be active */
-		chip->lockup_lpm_wrkarnd = true;
-
-		/* Enable LPM */
-		pm8921_chg_set_lpm(chip, 1);
-	}
-
-	if (chip->lockup_lpm_wrkarnd) {
-		chip->vreg_xoadc = regulator_get(chip->dev, "vreg_xoadc");
-		if (IS_ERR(chip->vreg_xoadc))
-			return -ENODEV;
-
-		rc = regulator_set_optimum_mode(chip->vreg_xoadc, 10000);
-		if (rc < 0) {
-			pr_err("Failed to set configure HPM rc=%d\n", rc);
-			return rc;
-		}
-
-		rc = regulator_set_voltage(chip->vreg_xoadc, 1800000, 1800000);
-		if (rc) {
-			pr_err("Failed to set L14 voltage rc=%d\n", rc);
-			return rc;
-		}
-
-		rc = regulator_enable(chip->vreg_xoadc);
-		if (rc) {
-			pr_err("Failed to enable L14 rc=%d\n", rc);
-			return rc;
-		}
-	}
-
 	return 0;
 }
 
@@ -4740,19 +4486,16 @@
 	int rc;
 	struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
 
-	if (chip->lockup_lpm_wrkarnd) {
-		rc = regulator_disable(chip->vreg_xoadc);
-		if (rc)
-			pr_err("Failed to disable L14 rc=%d\n", rc);
-
-		rc = pm8921_apply_19p2mhz_kickstart(chip);
-		if (rc)
-			pr_err("Failed to apply kickstart rc=%d\n", rc);
-	}
-
 	rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON, 0);
 	if (rc)
 		pr_err("Failed to Force Vref therm off rc=%d\n", rc);
+
+	rc = pm8921_chg_set_lpm(chip, 1);
+	if (rc)
+		pr_err("Failed to set lpm rc=%d\n", rc);
+
+	pm8921_chg_set_hw_clk_switching(chip);
+
 	return 0;
 }
 
@@ -4761,15 +4504,11 @@
 	int rc;
 	struct pm8921_chg_chip *chip = dev_get_drvdata(dev);
 
-	if (chip->lockup_lpm_wrkarnd) {
-		rc = regulator_enable(chip->vreg_xoadc);
-		if (rc)
-			pr_err("Failed to enable L14 rc=%d\n", rc);
+	pm8921_chg_force_19p2mhz_clk(chip);
 
-		rc = pm8921_apply_19p2mhz_kickstart(chip);
-		if (rc)
-			pr_err("Failed to apply kickstart rc=%d\n", rc);
-	}
+	rc = pm8921_chg_set_lpm(chip, 0);
+	if (rc)
+		pr_err("Failed to set lpm rc=%d\n", rc);
 
 	rc = pm_chg_masked_write(chip, CHG_CNTRL, VREF_BATT_THERM_FORCE_ON,
 						VREF_BATT_THERM_FORCE_ON);
@@ -4869,6 +4608,7 @@
 	chip->vin_min = pdata->vin_min;
 	chip->thermal_mitigation = pdata->thermal_mitigation;
 	chip->thermal_levels = pdata->thermal_levels;
+	chip->disable_chg_rmvl_wrkarnd = pdata->disable_chg_rmvl_wrkarnd;
 
 	chip->cold_thr = pdata->cold_thr;
 	chip->hot_thr = pdata->hot_thr;
@@ -5000,7 +4740,6 @@
 {
 	struct pm8921_chg_chip *chip = platform_get_drvdata(pdev);
 
-	regulator_put(chip->vreg_xoadc);
 	free_irqs(chip);
 	platform_set_drvdata(pdev, NULL);
 	the_chip = NULL;
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 85a310a..ec0b0e7 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -670,10 +670,18 @@
 #define SLEEP_CLK_HZ		32764
 #define SECONDS_PER_HOUR	3600
 
-static s64 cc_uv_to_uvh(s64 cc_uv)
+static s64 cc_uv_to_pvh(s64 cc_uv)
 {
-	return div_s64(cc_uv * CC_READING_TICKS,
-			SLEEP_CLK_HZ * SECONDS_PER_HOUR);
+	/* Note that it is necessary need to multiply by 1000000 to convert
+	 * from uvh to pvh here.
+	 * However, the maximum Coulomb Counter value is 2^35, which can cause
+	 * an over flow.
+	 * Multiply by 100000 first to perserve as much precision as possible
+	 * then multiply by 10 after doing the division in order to avoid
+	 * overflow on the maximum Coulomb Counter value.
+	 */
+	return div_s64(cc_uv * CC_READING_TICKS * 100000,
+			SLEEP_CLK_HZ * SECONDS_PER_HOUR) * 10;
 }
 
 /**
@@ -688,7 +696,7 @@
  */
 static int calculate_cc(struct qpnp_bms_chip *chip, int64_t cc)
 {
-	int64_t cc_voltage_uv, cc_uvh, cc_uah;
+	int64_t cc_voltage_uv, cc_pvh, cc_uah;
 	struct qpnp_iadc_calib calibration;
 
 	qpnp_iadc_get_gain_and_offset(&calibration);
@@ -702,9 +710,9 @@
 					calibration.gain_raw
 					- calibration.offset_raw);
 	pr_debug("cc_voltage_uv = %lld uv\n", cc_voltage_uv);
-	cc_uvh = cc_uv_to_uvh(cc_voltage_uv);
-	pr_debug("cc_uvh = %lld micro_volt_hour\n", cc_uvh);
-	cc_uah = div_s64(cc_uvh * 1000000LL, chip->r_sense_uohm);
+	cc_pvh = cc_uv_to_pvh(cc_voltage_uv);
+	pr_debug("cc_pvh = %lld pvh\n", cc_pvh);
+	cc_uah = div_s64(cc_pvh, chip->r_sense_uohm);
 	/* cc_raw had 4 bits of extra precision.
 	   By now it should be within 32 bit range */
 	return (int)cc_uah;
@@ -1087,60 +1095,6 @@
 	return 1;
 }
 
-#define BMS_OVERRIDE_MODE_EN_BIT	BIT(7)
-#define EN_VBAT_BIT			BIT(0)
-#define OVERRIDE_MODE_DELAY_MS		20
-static int override_mode_batt_v_and_i(
-		struct qpnp_bms_chip *chip, int *ibat_ua, int *vbat_uv)
-{
-	int16_t vsense_raw, vbat_raw;
-	int vsense_uv, rc;
-	u8 delay;
-
-	mutex_lock(&chip->bms_output_lock);
-
-	delay = 0x00;
-	rc = qpnp_write_wrapper(chip, &delay,
-			chip->base + BMS1_S1_DELAY_CTL, 1);
-	if (rc)
-		pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
-
-	rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
-			BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT,
-			BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT);
-	if (rc)
-		pr_err("unable to write into BMS1_MODE_CTL, rc: %d\n", rc);
-
-	msleep(OVERRIDE_MODE_DELAY_MS);
-
-	lock_output_data(chip);
-	qpnp_read_wrapper(chip, (u8 *)&vsense_raw,
-			chip->base + BMS1_VSENSE_AVG_DATA0, 2);
-	qpnp_read_wrapper(chip, (u8 *)&vbat_raw,
-			chip->base + BMS1_VBAT_AVG_DATA0, 2);
-	unlock_output_data(chip);
-
-	rc = qpnp_masked_write(chip, BMS1_MODE_CTL,
-			BMS_OVERRIDE_MODE_EN_BIT | EN_VBAT_BIT, 0);
-
-	delay = 0x0B;
-	rc = qpnp_write_wrapper(chip, &delay,
-			chip->base + BMS1_S1_DELAY_CTL, 1);
-	if (rc)
-		pr_err("unable to write into BMS1_S1_DELAY, rc: %d\n", rc);
-
-	mutex_unlock(&chip->bms_output_lock);
-
-	*vbat_uv = convert_vbatt_raw_to_uv(chip, vbat_raw);
-	vsense_uv = convert_vsense_to_uv(chip, vsense_raw);
-	*ibat_ua = div_s64(vsense_uv * 1000000LL, (int)chip->r_sense_uohm);
-
-	pr_debug("vsense_raw = 0x%x vbat_raw = 0x%x ibat_ua = %d vbat_uv = %d\n",
-			(uint16_t)vsense_raw, (uint16_t)vbat_raw,
-			*ibat_ua, *vbat_uv);
-	return 0;
-}
-
 static bool is_battery_charging(struct qpnp_bms_chip *chip)
 {
 	union power_supply_propval ret = {0,};
@@ -1180,23 +1134,21 @@
 static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
 					int *ibat_ua, int *vbat_uv)
 {
+	struct qpnp_iadc_result i_result;
+	struct qpnp_vadc_result v_result;
+	enum qpnp_iadc_channels iadc_channel;
 	int rc;
 
-	if (is_batfet_open(chip)) {
-		pr_debug("batfet is open using separate vbat and ibat meas\n");
-		rc = get_battery_voltage(vbat_uv);
-		if (rc < 0) {
-			pr_err("adc vbat failed err = %d\n", rc);
-			return rc;
-		}
-		rc = get_battery_current(chip, ibat_ua);
-		if (rc < 0) {
-			pr_err("bms ibat failed err = %d\n", rc);
-			return rc;
-		}
-	} else {
-		return override_mode_batt_v_and_i(chip, ibat_ua, vbat_uv);
+	iadc_channel = chip->use_external_rsense ?
+				EXTERNAL_RSENSE : INTERNAL_RSENSE;
+	rc = qpnp_iadc_vadc_sync_read(iadc_channel, &i_result,
+				VBAT_SNS, &v_result);
+	if (rc) {
+		pr_err("vadc read failed with rc: %d\n", rc);
+		return rc;
 	}
+	*ibat_ua = (int)i_result.result_ua;
+	*vbat_uv = (int)v_result.physical;
 
 	return 0;
 }
@@ -1223,7 +1175,7 @@
 
 static int reset_bms_for_test(struct qpnp_bms_chip *chip)
 {
-	int ibat_ua, vbat_uv, rc;
+	int ibat_ua = 0, vbat_uv = 0, rc;
 	int ocv_est_uv;
 
 	if (!chip) {
@@ -1474,16 +1426,12 @@
 static int clamp_soc_based_on_voltage(struct qpnp_bms_chip *chip, int soc)
 {
 	int rc, vbat_uv;
-	struct qpnp_vadc_result result;
 
-	rc = qpnp_vadc_read(VBAT_SNS, &result);
-	if (rc) {
-		pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
-						VBAT_SNS, rc);
-		return rc;
+	rc = get_battery_voltage(&vbat_uv);
+	if (rc < 0) {
+		pr_err("adc vbat failed err = %d\n", rc);
+		return soc;
 	}
-
-	vbat_uv = (int)result.physical;
 	if (soc == 0 && vbat_uv > chip->v_cutoff_uv) {
 		pr_debug("clamping soc to 1, vbat (%d) > cutoff (%d)\n",
 						vbat_uv, chip->v_cutoff_uv);
@@ -1612,28 +1560,16 @@
 	return chip->calculated_soc;
 }
 
-static int read_vbat(struct qpnp_bms_chip *chip)
-{
-	int rc;
-	struct qpnp_vadc_result result;
-
-	rc = qpnp_vadc_read(VBAT_SNS, &result);
-	if (rc) {
-		pr_err("error reading vadc VBAT_SNS = %d, rc = %d\n",
-					VBAT_SNS, rc);
-		return rc;
-	}
-	pr_debug("read %duv from vadc\n", (int)result.physical);
-	return (int)result.physical;
-}
-
 static int calculate_soc_from_voltage(struct qpnp_bms_chip *chip)
 {
 	int voltage_range_uv, voltage_remaining_uv, voltage_based_soc;
-	int vbat_uv;
+	int rc, vbat_uv;
 
-	vbat_uv = read_vbat(chip);
-
+	rc = get_battery_voltage(&vbat_uv);
+	if (rc < 0) {
+		pr_err("adc vbat failed err = %d\n", rc);
+		return rc;
+	}
 	voltage_range_uv = chip->max_voltage_uv - chip->v_cutoff_uv;
 	voltage_remaining_uv = vbat_uv - chip->v_cutoff_uv;
 	voltage_based_soc = voltage_remaining_uv * 100 / voltage_range_uv;
@@ -2467,7 +2403,12 @@
 	}
 
 	vbatt = 0;
-	get_battery_voltage(&vbatt);
+	rc = get_battery_voltage(&vbatt);
+	if (rc) {
+		pr_err("error reading vbat_sns adc channel = %d, rc = %d\n",
+						VBAT_SNS, rc);
+		goto unregister_dc;
+	}
 
 	pr_info("probe success: soc =%d vbatt = %d ocv = %d r_sense_uohm = %u\n",
 				get_prop_bms_capacity(chip),
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index e2ba042..7833afa 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -86,8 +86,8 @@
 #define USB_OVP_CTL				0x42
 #define SEC_ACCESS				0xD0
 
-/* SMBB peripheral subtype values */
 #define REG_OFFSET_PERP_SUBTYPE			0x05
+/* SMBB peripheral subtype values */
 #define SMBB_CHGR_SUBTYPE			0x01
 #define SMBB_BUCK_SUBTYPE			0x02
 #define SMBB_BAT_IF_SUBTYPE			0x03
@@ -96,6 +96,14 @@
 #define SMBB_BOOST_SUBTYPE			0x06
 #define SMBB_MISC_SUBTYPE			0x07
 
+/* SMBB peripheral subtype values */
+#define SMBBP_CHGR_SUBTYPE			0x31
+#define SMBBP_BUCK_SUBTYPE			0x32
+#define SMBBP_BAT_IF_SUBTYPE			0x33
+#define SMBBP_USB_CHGPTH_SUBTYPE		0x34
+#define SMBBP_BOOST_SUBTYPE			0x36
+#define SMBBP_MISC_SUBTYPE			0x37
+
 #define QPNP_CHARGER_DEV_NAME	"qcom,qpnp-charger"
 
 /* Status bits and masks */
@@ -341,6 +349,9 @@
 	u8 dcin_valid_rt_sts;
 	int rc;
 
+	if (!chip->dc_chgpth_base)
+		return 0;
+
 	rc = qpnp_chg_read(chip, &dcin_valid_rt_sts,
 				 INT_RT_STS(chip->dc_chgpth_base), 1);
 	if (rc) {
@@ -1212,6 +1223,7 @@
 
 	switch (subtype) {
 	case SMBB_CHGR_SUBTYPE:
+	case SMBBP_CHGR_SUBTYPE:
 		chip->chg_done_irq = spmi_get_irq_byname(chip->spmi,
 						spmi_resource, "chg-done");
 		if (chip->chg_done_irq < 0) {
@@ -1289,6 +1301,7 @@
 		enable_irq_wake(chip->chg_done_irq);
 		break;
 	case SMBB_BUCK_SUBTYPE:
+	case SMBBP_BUCK_SUBTYPE:
 		rc = qpnp_chg_masked_write(chip,
 			chip->chgr_base + CHGR_BUCK_BCK_VBAT_REG_MODE,
 			BUCK_VBAT_REG_NODE_SEL_BIT,
@@ -1299,8 +1312,10 @@
 		}
 		break;
 	case SMBB_BAT_IF_SUBTYPE:
+	case SMBBP_BAT_IF_SUBTYPE:
 		break;
 	case SMBB_USB_CHGPTH_SUBTYPE:
+	case SMBBP_USB_CHGPTH_SUBTYPE:
 		chip->usbin_valid_irq = spmi_get_irq_byname(chip->spmi,
 						spmi_resource, "usbin-valid");
 		if (chip->usbin_valid_irq < 0) {
@@ -1361,8 +1376,10 @@
 		enable_irq_wake(chip->dcin_valid_irq);
 		break;
 	case SMBB_BOOST_SUBTYPE:
+	case SMBBP_BOOST_SUBTYPE:
 		break;
 	case SMBB_MISC_SUBTYPE:
+	case SMBBP_MISC_SUBTYPE:
 		pr_debug("Setting BOOT_DONE\n");
 		rc = qpnp_chg_masked_write(chip,
 			chip->misc_base + CHGR_MISC_BOOT_DONE,
@@ -1397,10 +1414,6 @@
 		return -ENOMEM;
 	}
 
-	rc = qpnp_vadc_is_ready();
-	if (rc)
-		goto fail_chg_enable;
-
 	chip->dev = &(spmi->dev);
 	chip->spmi = spmi;
 
@@ -1557,6 +1570,7 @@
 
 		switch (subtype) {
 		case SMBB_CHGR_SUBTYPE:
+		case SMBBP_CHGR_SUBTYPE:
 			chip->chgr_base = resource->start;
 			rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
 			if (rc) {
@@ -1566,6 +1580,7 @@
 			}
 			break;
 		case SMBB_BUCK_SUBTYPE:
+		case SMBBP_BUCK_SUBTYPE:
 			chip->buck_base = resource->start;
 			rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
 			if (rc) {
@@ -1575,6 +1590,7 @@
 			}
 			break;
 		case SMBB_BAT_IF_SUBTYPE:
+		case SMBBP_BAT_IF_SUBTYPE:
 			chip->bat_if_base = resource->start;
 			rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
 			if (rc) {
@@ -1584,6 +1600,7 @@
 			}
 			break;
 		case SMBB_USB_CHGPTH_SUBTYPE:
+		case SMBBP_USB_CHGPTH_SUBTYPE:
 			chip->usb_chgpth_base = resource->start;
 			rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
 			if (rc) {
@@ -1602,6 +1619,7 @@
 			}
 			break;
 		case SMBB_BOOST_SUBTYPE:
+		case SMBBP_BOOST_SUBTYPE:
 			chip->boost_base = resource->start;
 			rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
 			if (rc) {
@@ -1611,6 +1629,7 @@
 			}
 			break;
 		case SMBB_MISC_SUBTYPE:
+		case SMBBP_MISC_SUBTYPE:
 			chip->misc_base = resource->start;
 			rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
 			if (rc) {
@@ -1628,34 +1647,44 @@
 	dev_set_drvdata(&spmi->dev, chip);
 	device_init_wakeup(&spmi->dev, 1);
 
-	chip->dc_psy.name = "qpnp-dc";
-	chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
-	chip->dc_psy.supplied_to = pm_power_supplied_to;
-	chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
-	chip->dc_psy.properties = pm_power_props_mains;
-	chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
-	chip->dc_psy.get_property = qpnp_power_get_property_mains;
+	if (chip->bat_if_base) {
+		rc = qpnp_vadc_is_ready();
+		if (rc)
+			goto fail_chg_enable;
 
-	chip->batt_psy.name = "battery";
-	chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
-	chip->batt_psy.properties = msm_batt_power_props;
-	chip->batt_psy.num_properties = ARRAY_SIZE(msm_batt_power_props);
-	chip->batt_psy.get_property = qpnp_batt_power_get_property;
-	chip->batt_psy.set_property = qpnp_batt_power_set_property;
-	chip->batt_psy.property_is_writeable = qpnp_batt_property_is_writeable;
-	chip->batt_psy.external_power_changed =
+		chip->batt_psy.name = "battery";
+		chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+		chip->batt_psy.properties = msm_batt_power_props;
+		chip->batt_psy.num_properties =
+			ARRAY_SIZE(msm_batt_power_props);
+		chip->batt_psy.get_property = qpnp_batt_power_get_property;
+		chip->batt_psy.set_property = qpnp_batt_power_set_property;
+		chip->batt_psy.property_is_writeable =
+				qpnp_batt_property_is_writeable;
+		chip->batt_psy.external_power_changed =
 				qpnp_batt_external_power_changed;
 
-	rc = power_supply_register(chip->dev, &chip->batt_psy);
-	if (rc < 0) {
-		pr_err("power_supply_register batt failed rc = %d\n", rc);
-		goto fail_chg_enable;
+		rc = power_supply_register(chip->dev, &chip->batt_psy);
+		if (rc < 0) {
+			pr_err("batt failed to register rc = %d\n", rc);
+			goto fail_chg_enable;
+		}
 	}
 
-	rc = power_supply_register(chip->dev, &chip->dc_psy);
-	if (rc < 0) {
-		pr_err("power_supply_register usb failed rc = %d\n", rc);
-		goto unregister_batt;
+	if (chip->dc_chgpth_base) {
+		chip->dc_psy.name = "qpnp-dc";
+		chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
+		chip->dc_psy.supplied_to = pm_power_supplied_to;
+		chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
+		chip->dc_psy.properties = pm_power_props_mains;
+		chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
+		chip->dc_psy.get_property = qpnp_power_get_property_mains;
+
+		rc = power_supply_register(chip->dev, &chip->dc_psy);
+		if (rc < 0) {
+			pr_err("power_supply_register dc failed rc=%d\n", rc);
+			goto unregister_batt;
+		}
 	}
 
 	/* Turn on appropriate workaround flags */
@@ -1664,11 +1693,11 @@
 	power_supply_set_present(chip->usb_psy,
 			qpnp_chg_is_usb_chg_plugged_in(chip));
 
-	if (chip->maxinput_dc_ma) {
+	if (chip->maxinput_dc_ma && chip->dc_chgpth_base) {
 		rc = qpnp_chg_idcmax_set(chip, chip->maxinput_dc_ma);
 		if (rc) {
 			pr_err("Error setting idcmax property %d\n", rc);
-			goto fail_chg_enable;
+			goto unregister_batt;
 		}
 	}
 
@@ -1684,7 +1713,8 @@
 	return 0;
 
 unregister_batt:
-	power_supply_unregister(&chip->batt_psy);
+	if (chip->bat_if_base)
+		power_supply_unregister(&chip->batt_psy);
 fail_chg_enable:
 	kfree(chip->thermal_mitigation);
 	kfree(chip);
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
index 9b0b8b4..9a864aa 100644
--- a/drivers/slimbus/slim-msm-ctrl.c
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -263,6 +263,17 @@
 			 */
 			mb();
 			complete(&dev->rx_msgq_notify);
+		} else if (mt == SLIM_MSG_MT_CORE &&
+			mc == SLIM_MSG_MC_REPORT_ABSENT) {
+			writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
+						MGR_INT_CLR);
+			/*
+			 * Guarantee that CLR bit write goes through
+			 * before signalling completion
+			 */
+			mb();
+			complete(&dev->rx_msgq_notify);
+
 		} else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
 				mc == SLIM_MSG_MC_REPLY_VALUE) {
 			msm_slim_rx_enqueue(dev, rx_buf, len);
@@ -975,6 +986,10 @@
 			txn.wbuf = wbuf;
 			gen_ack = true;
 			ret = msm_xfer_msg(&dev->ctrl, &txn);
+			break;
+		case SLIM_MSG_MC_REPORT_ABSENT:
+			dev_info(dev->dev, "Received Report Absent Message\n");
+			break;
 		default:
 			break;
 		}
@@ -1087,7 +1102,8 @@
 				laddr = (u8)((buffer[0] >> 16) & 0xff);
 				sat = addr_to_sat(dev, laddr);
 			}
-		} else if ((index * 4) >= msg_len) {
+		}
+		if ((index * 4) >= msg_len) {
 			index = 0;
 			if (sat) {
 				msm_sat_enqueue(sat, buffer, msg_len);
diff --git a/drivers/spmi/qpnp-int.c b/drivers/spmi/qpnp-int.c
index d1d49ef..082c9ff 100644
--- a/drivers/spmi/qpnp-int.c
+++ b/drivers/spmi/qpnp-int.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,13 +26,16 @@
 #include <linux/radix-tree.h>
 #include <linux/slab.h>
 #include <linux/printk.h>
+#include <linux/ratelimit.h>
 
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
 #include <mach/qpnp-int.h>
 
 /* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */
-#define QPNPINT_NR_IRQS (16 * 256 * 8)
+#define QPNPINT_NR_IRQS		(16 * 256 * 8)
+/* This value is guaranteed not to be valid for private data */
+#define QPNPINT_INVALID_DATA	0x80000000
 
 enum qpnpint_regs {
 	QPNPINT_REG_RT_STS		= 0x10,
@@ -65,7 +68,7 @@
 struct q_chip_data {
 	int bus_nr;
 	struct irq_domain *domain;
-	struct qpnp_local_int cb;
+	struct qpnp_local_int *cb;
 	struct spmi_controller *spmi_ctrl;
 	struct radix_tree_root per_tree;
 	struct list_head list;
@@ -114,6 +117,18 @@
 	return 0;
 }
 
+static int qpnpint_spmi_read(struct q_irq_data *irq_d, uint8_t reg,
+			     void *buf, uint32_t len)
+{
+	struct q_chip_data *chip_d = irq_d->chip_d;
+
+	if (!chip_d->spmi_ctrl)
+		return -ENODEV;
+
+	return spmi_ext_register_readl(chip_d->spmi_ctrl, irq_d->spmi_slave,
+				       irq_d->spmi_offset + reg, buf, len);
+}
+
 static int qpnpint_spmi_write(struct q_irq_data *irq_d, uint8_t reg,
 			      void *buf, uint32_t len)
 {
@@ -128,31 +143,76 @@
 	return rc;
 }
 
+static int qpnpint_arbiter_op(struct irq_data *d,
+			      struct q_irq_data *irq_d,
+			      int (*arb_op)(struct spmi_controller *,
+					    struct qpnp_irq_spec *,
+					    uint32_t))
+
+{
+	struct q_chip_data *chip_d = irq_d->chip_d;
+	struct qpnp_irq_spec q_spec;
+	int rc;
+
+	if (!arb_op)
+		return 0;
+
+	if (!chip_d->cb->register_priv_data) {
+		pr_warn_ratelimited("No ability to register arbiter registration data\n");
+		return -ENODEV;
+	}
+
+	rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
+	if (rc) {
+		pr_err_ratelimited("%s: decode failed on hwirq %lu\n",
+							__func__, d->hwirq);
+		return rc;
+	} else {
+		if (irq_d->priv_d == QPNPINT_INVALID_DATA) {
+			rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl,
+						&q_spec, &irq_d->priv_d);
+			if (rc) {
+				pr_err_ratelimited(
+					"%s: decode failed on hwirq %lu\n",
+					__func__, d->hwirq);
+				return rc;
+			}
+
+		}
+		arb_op(chip_d->spmi_ctrl, &q_spec, irq_d->priv_d);
+	}
+
+	return 0;
+}
+
 static void qpnpint_irq_mask(struct irq_data *d)
 {
 	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
-	struct qpnp_irq_spec q_spec;
 	int rc;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
-	if (chip_d->cb.mask) {
-		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
-		if (rc)
-			pr_err("decode failed on hwirq %lu\n", d->hwirq);
-		else
-			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
-								irq_d->priv_d);
+	if (!chip_d->cb) {
+		pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
+				chip_d->bus_nr, irq_d->spmi_slave,
+				irq_d->spmi_offset);
+		return;
 	}
 
+	qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+
 	per_d->int_en &= ~irq_d->mask_shift;
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 					(u8 *)&irq_d->mask_shift, 1);
-	if (rc)
-		pr_err("spmi failure on irq %d\n", d->irq);
+	if (rc) {
+		pr_err_ratelimited("spmi failure on irq %d\n", d->irq);
+		return;
+	}
+
+	pr_debug("done hwirq %lu irq: %d\n", d->hwirq, d->irq);
 }
 
 static void qpnpint_irq_mask_ack(struct irq_data *d)
@@ -160,32 +220,34 @@
 	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
-	struct qpnp_irq_spec q_spec;
 	int rc;
 
-	pr_debug("hwirq %lu irq: %d mask: 0x%x\n", d->hwirq, d->irq,
-							irq_d->mask_shift);
+	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
-	if (chip_d->cb.mask) {
-		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
-		if (rc)
-			pr_err("decode failed on hwirq %lu\n", d->hwirq);
-		else
-			chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec,
-								irq_d->priv_d);
+	if (!chip_d->cb) {
+		pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
+				chip_d->bus_nr, irq_d->spmi_slave,
+				irq_d->spmi_offset);
+		return;
 	}
 
+	qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask);
+
 	per_d->int_en &= ~irq_d->mask_shift;
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR,
 							&irq_d->mask_shift, 1);
-	if (rc)
+	if (rc) {
 		pr_err("spmi failure on irq %d\n", d->irq);
+		return;
+	}
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR,
 							&irq_d->mask_shift, 1);
-	if (rc)
+	if (rc) {
 		pr_err("spmi failure on irq %d\n", d->irq);
+		return;
+	}
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
@@ -193,25 +255,26 @@
 	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
 	struct q_chip_data *chip_d = irq_d->chip_d;
 	struct q_perip_data *per_d = irq_d->per_d;
-	struct qpnp_irq_spec q_spec;
 	int rc;
 
 	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
 
-	if (chip_d->cb.unmask) {
-		rc = qpnpint_decode_hwirq(d->hwirq, &q_spec);
-		if (rc)
-			pr_err("decode failed on hwirq %lu\n", d->hwirq);
-		else
-			chip_d->cb.unmask(chip_d->spmi_ctrl, &q_spec,
-								irq_d->priv_d);
+	if (!chip_d->cb) {
+		pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n",
+				chip_d->bus_nr, irq_d->spmi_slave,
+				irq_d->spmi_offset);
+		return;
 	}
 
+	qpnpint_arbiter_op(d, irq_d, chip_d->cb->unmask);
+
 	per_d->int_en |= irq_d->mask_shift;
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET,
 					&irq_d->mask_shift, 1);
-	if (rc)
+	if (rc) {
 		pr_err("spmi failure on irq %d\n", d->irq);
+		return;
+	}
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -248,9 +311,29 @@
 	buf[2] = per_d->pol_low;
 
 	rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3);
-	if (rc)
+	if (rc) {
 		pr_err("spmi failure on irq %d\n", d->irq);
-	return rc;
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnpint_irq_read_line(struct irq_data *d)
+{
+	struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d);
+	int rc;
+	u8 buf;
+
+	pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq);
+
+	rc = qpnpint_spmi_read(irq_d, QPNPINT_REG_RT_STS, &buf, 1);
+	if (rc) {
+		pr_err("spmi failure on irq %d\n", d->irq);
+		return rc;
+	}
+
+	return (buf & irq_d->mask_shift) ? 1 : 0;
 }
 
 static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -264,6 +347,7 @@
 	.irq_mask_ack	= qpnpint_irq_mask_ack,
 	.irq_unmask	= qpnpint_irq_unmask,
 	.irq_set_type	= qpnpint_irq_set_type,
+	.irq_read_line	= qpnpint_irq_read_line,
 	.irq_set_wake	= qpnpint_irq_set_wake,
 	.flags		= IRQCHIP_MASK_ON_SUSPEND,
 };
@@ -283,11 +367,14 @@
 	irq_d->spmi_offset = q_spec.per << 8;
 	irq_d->chip_d = chip_d;
 
-	if (chip_d->cb.register_priv_data)
-		rc = chip_d->cb.register_priv_data(chip_d->spmi_ctrl, &q_spec,
+	irq_d->priv_d = QPNPINT_INVALID_DATA;
+
+	if (chip_d->cb && chip_d->cb->register_priv_data) {
+		rc = chip_d->cb->register_priv_data(chip_d->spmi_ctrl, &q_spec,
 							&irq_d->priv_d);
 		if (rc)
 			return rc;
+	}
 
 	irq_d->per_d->use_count++;
 	return 0;
@@ -365,6 +452,8 @@
 	*out_hwirq = ret;
 	*out_type = IRQ_TYPE_NONE;
 
+	pr_debug("out_hwirq = %lu\n", *out_hwirq);
+
 	return 0;
 }
 
@@ -386,7 +475,7 @@
 
 	pr_debug("hwirq = %lu\n", hwirq);
 
-	if (hwirq < 0 || hwirq >= 32768) {
+	if (hwirq < 0 || hwirq >= QPNPINT_NR_IRQS) {
 		pr_err("hwirq %lu out of bounds\n", hwirq);
 		return -EINVAL;
 	}
@@ -448,7 +537,10 @@
 
 	list_for_each_entry(chip_d, &qpnpint_chips, list)
 		if (node == chip_d->domain->of_node) {
-			chip_d->cb = *li_cb;
+			chip_d->cb = kmemdup(li_cb,
+						sizeof(*li_cb), GFP_ATOMIC);
+			if (!chip_d->cb)
+				return -ENOMEM;
 			chip_d->spmi_ctrl = ctrl;
 			chip_lookup[ctrl->nr] = chip_d;
 			return 0;
@@ -458,6 +550,27 @@
 }
 EXPORT_SYMBOL(qpnpint_register_controller);
 
+int qpnpint_unregister_controller(struct device_node *node)
+{
+	struct q_chip_data *chip_d;
+
+	if (!node)
+		return -EINVAL;
+
+	list_for_each_entry(chip_d, &qpnpint_chips, list)
+		if (node == chip_d->domain->of_node) {
+			kfree(chip_d->cb);
+			chip_d->cb = NULL;
+			if (chip_d->spmi_ctrl)
+				chip_lookup[chip_d->spmi_ctrl->nr] = NULL;
+			chip_d->spmi_ctrl = NULL;
+			return 0;
+		}
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL(qpnpint_unregister_controller);
+
 int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
 		       struct qpnp_irq_spec *spec)
 {
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 450db0b..05a4806 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -763,12 +763,18 @@
 static int __devexit spmi_pmic_arb_remove(struct platform_device *pdev)
 {
 	struct spmi_pmic_arb_dev *pmic_arb = platform_get_drvdata(pdev);
+	int ret;
+
+	ret = qpnpint_unregister_controller(pmic_arb->controller.dev.of_node);
+	if (ret)
+		dev_err(&pdev->dev, "Unable to unregister controller %d\n",
+					pmic_arb->controller.nr);
 
 	if (pmic_arb->allow_wakeup)
 		irq_set_irq_wake(pmic_arb->pic_irq, 0);
 	platform_set_drvdata(pdev, NULL);
 	spmi_del_controller(&pmic_arb->controller);
-	return 0;
+	return ret;
 }
 
 static struct of_device_id spmi_pmic_arb_match_table[] = {
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
index e37b3c4..482d383 100644
--- a/drivers/thermal/msm8974-tsens.c
+++ b/drivers/thermal/msm8974-tsens.c
@@ -63,7 +63,7 @@
 #define TSENS_SN_REMOTE_CONFIG(n)	((n) + 0x3c)
 
 #define TSENS_EEPROM(n)			((n) + 0xd0)
-#define TSENS_EEPROM_REDUNDANCY_SEL(n)	((n) + 0x1cc)
+#define TSENS_EEPROM_REDUNDANCY_SEL(n)	((n) + 0x444)
 #define TSENS_EEPROM_BACKUP_REGION(n)	((n) + 0x440)
 
 #define TSENS_MAIN_CALIB_ADDR_RANGE	6
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index a27322e..c982587 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -2861,7 +2861,7 @@
 
 		if (unlikely(msm_uport->wakeup.irq < 0)) {
 			ret = -ENXIO;
-			goto unmap_memory;
+			goto deregister_bus_client;
 		}
 
 		if (is_blsp_uart(msm_uport)) {
@@ -2878,7 +2878,7 @@
 					IORESOURCE_DMA, "uartdm_channels");
 		if (unlikely(!resource)) {
 			ret =  -ENXIO;
-			goto unmap_memory;
+			goto deregister_bus_client;
 		}
 
 		msm_uport->dma_tx_channel = resource->start;
@@ -2888,7 +2888,7 @@
 					IORESOURCE_DMA, "uartdm_crci");
 		if (unlikely(!resource)) {
 			ret = -ENXIO;
-			goto unmap_memory;
+			goto deregister_bus_client;
 		}
 
 		msm_uport->dma_tx_crci = resource->start;
@@ -2905,7 +2905,7 @@
 	msm_uport->clk = clk_get(&pdev->dev, "core_clk");
 	if (IS_ERR(msm_uport->clk)) {
 		ret = PTR_ERR(msm_uport->clk);
-		goto unmap_memory;
+		goto deregister_bus_client;
 	}
 
 	msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
@@ -2919,7 +2919,7 @@
 	ret = clk_set_rate(msm_uport->clk, uport->uartclk);
 	if (ret) {
 		printk(KERN_WARNING "Error setting clock rate on UART\n");
-		goto unmap_memory;
+		goto put_clk;
 	}
 
 	msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
@@ -2928,7 +2928,7 @@
 		pr_err("%s(): Unable to create workqueue hsuart_wq\n",
 								__func__);
 		ret =  -ENOMEM;
-		goto unmap_memory;
+		goto put_clk;
 	}
 
 	INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
@@ -2946,7 +2946,7 @@
 		ret = msm_hs_sps_init(msm_uport);
 		if (unlikely(ret)) {
 			pr_err("SPS Initialization failed ! err=%d", ret);
-			goto workqueue_destroy;
+			goto destroy_mutex;
 		}
 	}
 
@@ -2989,7 +2989,6 @@
 		uport->line = pdata->userid;
 	ret = uart_add_one_port(&msm_hs_driver, uport);
 	if (!ret) {
-
 		msm_hs_bus_voting(msm_uport, BUS_RESET);
 		clk_disable_unprepare(msm_uport->clk);
 		if (msm_uport->pclk)
@@ -3003,8 +3002,21 @@
 	clk_disable_unprepare(msm_uport->clk);
 	if (msm_uport->pclk)
 		clk_disable_unprepare(msm_uport->pclk);
-workqueue_destroy:
+
+destroy_mutex:
+	mutex_destroy(&msm_uport->clk_mutex);
 	destroy_workqueue(msm_uport->hsuart_wq);
+
+put_clk:
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	if (msm_uport->clk)
+		clk_put(msm_uport->clk);
+
+deregister_bus_client:
+	if (is_blsp_uart(msm_uport))
+		msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
 unmap_memory:
 	iounmap(uport->membase);
 	if (is_blsp_uart(msm_uport))
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index c9f4199..8069b35 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -2,7 +2,7 @@
  * drivers/serial/msm_serial.c - driver for msm7k serial device and console
  *
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -1257,6 +1257,9 @@
 {
 	int ret;
 	struct msm_hsl_port *msm_hsl_port = UART_TO_MSM(port);
+	struct platform_device *pdev = to_platform_device(port->dev);
+	const struct msm_serial_hslite_platform_data *pdata =
+					pdev->dev.platform_data;
 
 	switch (state) {
 	case 0:
@@ -1268,9 +1271,11 @@
 		break;
 	case 3:
 		clk_en(port, 0);
-		ret = clk_set_rate(msm_hsl_port->clk, 0);
-		if (ret)
-			pr_err("Error setting UART clock rate to zero.\n");
+		if (pdata && pdata->set_uart_clk_zero) {
+			ret = clk_set_rate(msm_hsl_port->clk, 0);
+			if (ret)
+				pr_err("Error setting UART clock rate to zero.\n");
+		}
 		break;
 	default:
 		pr_err("Unknown PM state %d\n", state);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index beba33f..f38de0c 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1935,6 +1935,9 @@
 	case POWER_SUPPLY_PROP_ONLINE:
 		val->intval = mdwc->online;
 		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = psy->type;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -1973,6 +1976,9 @@
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		mdwc->current_max = val->intval;
 		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		psy->type = val->intval;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0664376..5694999 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -191,7 +191,8 @@
 	 * FIXME For now we will only allocate 1 wMaxPacketSize space
 	 * for each enabled endpoint, later patches will come to
 	 * improve this algorithm so that we better use the internal
-	 * FIFO space
+	 * FIFO space. Also consider the case where TxFIFO RAM space
+	 * may change dynamically based on the USB configuration.
 	 */
 	for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
 		struct dwc3_ep	*dep = dwc->eps[num];
@@ -205,7 +206,8 @@
 		if (!(dep->flags & DWC3_EP_ENABLED))
 			continue;
 
-		if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
+		if (((dep->endpoint.maxburst > 1) &&
+				usb_endpoint_xfer_bulk(dep->endpoint.desc))
 				|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
 			mult = 3;
 
@@ -215,8 +217,8 @@
 		 * Make sure that's true somehow and change FIFO allocation
 		 * accordingly.
 		 *
-		 * If we have Bulk or Isochronous endpoints, we want
-		 * them to be able to be very, very fast. So we're giving
+		 * If we have Bulk (burst only) or Isochronous endpoints, we
+		 * want them to be able to be very, very fast. So we're giving
 		 * those endpoints a fifo_size which is enough for 3 full
 		 * packets
 		 */
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 76d75ea..9dd9c40 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -53,7 +53,9 @@
 #include "f_rmnet_sdio.c"
 #include "f_rmnet_smd_sdio.c"
 #include "f_rmnet.c"
+#ifdef CONFIG_SND_PCM
 #include "f_audio_source.c"
+#endif
 #include "f_mass_storage.c"
 #include "u_serial.c"
 #include "u_sdio.c"
@@ -74,10 +76,10 @@
 #define USB_ETH_RNDIS y
 #include "f_rndis.c"
 #include "rndis.c"
+#include "f_qc_ecm.c"
 #include "u_bam_data.c"
 #include "f_mbim.c"
 #include "f_ecm.c"
-#include "f_qc_ecm.c"
 #include "f_qc_rndis.c"
 #include "u_ether.c"
 #include "u_qc_ether.c"
@@ -653,6 +655,9 @@
 	.attributes	= rmnet_function_attributes,
 };
 
+/* ecm transport string */
+static char ecm_transports[MAX_XPORT_STR_LEN];
+
 struct ecm_function_config {
 	u8      ethaddr[ETH_ALEN];
 };
@@ -676,6 +681,7 @@
 					struct usb_configuration *c)
 {
 	int ret;
+	char *trans;
 	struct ecm_function_config *ecm = f->config;
 
 	if (!ecm) {
@@ -687,19 +693,28 @@
 		ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
 		ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
 
-	ret = gether_qc_setup_name(c->cdev->gadget, ecm->ethaddr, "ecm");
-	if (ret) {
-		pr_err("%s: gether_setup failed\n", __func__);
-		return ret;
+	pr_debug("%s: ecm_transport is %s", __func__, ecm_transports);
+
+	trans = strim(ecm_transports);
+	if (strcmp("BAM2BAM_IPA", trans)) {
+		ret = gether_qc_setup_name(c->cdev->gadget,
+						ecm->ethaddr, "ecm");
+		if (ret) {
+			pr_err("%s: gether_setup failed\n", __func__);
+			return ret;
+		}
 	}
 
-	return ecm_qc_bind_config(c, ecm->ethaddr);
+	return ecm_qc_bind_config(c, ecm->ethaddr, trans);
 }
 
 static void ecm_qc_function_unbind_config(struct android_usb_function *f,
 						struct usb_configuration *c)
 {
-	gether_qc_cleanup_name("ecm0");
+	char *trans = strim(ecm_transports);
+
+	if (strcmp("BAM2BAM_IPA", trans))
+		gether_qc_cleanup_name("ecm0");
 }
 
 static ssize_t ecm_ethaddr_show(struct device *dev,
@@ -729,7 +744,24 @@
 static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show,
 					       ecm_ethaddr_store);
 
+static ssize_t ecm_transports_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", ecm_transports);
+}
+
+static ssize_t ecm_transports_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	strlcpy(ecm_transports, buf, sizeof(ecm_transports));
+	return size;
+}
+
+static DEVICE_ATTR(ecm_transports, S_IRUGO | S_IWUSR, ecm_transports_show,
+					       ecm_transports_store);
+
 static struct device_attribute *ecm_function_attributes[] = {
+	&dev_attr_ecm_transports,
 	&dev_attr_ecm_ethaddr,
 	NULL
 };
@@ -1591,6 +1623,7 @@
 	.ctrlrequest	= accessory_function_ctrlrequest,
 };
 
+#ifdef CONFIG_SND_PCM
 static int audio_source_function_init(struct android_usb_function *f,
 			struct usb_composite_dev *cdev)
 {
@@ -1652,6 +1685,7 @@
 	.unbind_config	= audio_source_function_unbind_config,
 	.attributes	= audio_source_function_attributes,
 };
+#endif
 
 static int android_uasp_connect_cb(bool connect)
 {
@@ -1722,7 +1756,9 @@
 	&ecm_function,
 	&mass_storage_function,
 	&accessory_function,
+#ifdef CONFIG_SND_PCM
 	&audio_source_function,
+#endif
 	&uasp_function,
 	NULL
 };
@@ -2045,6 +2081,7 @@
 	struct android_configuration *conf;
 	int enabled = 0;
 	bool audio_enabled = false;
+	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
 
 	if (!cdev)
 		return -ENODEV;
@@ -2090,7 +2127,7 @@
 					f_holder->f->disable(f_holder->f);
 			}
 		dev->enabled = false;
-	} else {
+	} else if (__ratelimit(&rl)) {
 		pr_err("android_usb: already %s\n",
 				dev->enabled ? "enabled" : "disabled");
 	}
diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
index a55f0e5..ff2287e 100644
--- a/drivers/usb/gadget/f_adb.c
+++ b/drivers/usb/gadget/f_adb.c
@@ -463,7 +463,10 @@
 
 static int adb_open(struct inode *ip, struct file *fp)
 {
-	pr_info("adb_open\n");
+	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+	if (__ratelimit(&rl))
+		pr_info("adb_open\n");
 	if (!_adb_dev)
 		return -ENODEV;
 
@@ -486,7 +489,10 @@
 
 static int adb_release(struct inode *ip, struct file *fp)
 {
-	pr_info("adb_release\n");
+	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+	if (__ratelimit(&rl))
+		pr_info("adb_release\n");
 
 	/*
 	 * ADB daemon closes the device file after I/O error.  The
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index d69e850..a32dd15 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -295,6 +295,7 @@
 	/* MBIM control descriptors */
 	(struct usb_descriptor_header *) &mbim_control_intf,
 	(struct usb_descriptor_header *) &mbim_header_desc,
+	(struct usb_descriptor_header *) &mbim_union_desc,
 	(struct usb_descriptor_header *) &mbb_desc,
 	(struct usb_descriptor_header *) &ext_mbb_desc,
 	(struct usb_descriptor_header *) &fs_mbim_notify_desc,
@@ -664,7 +665,8 @@
 
 	pr_info("dev:%p portno:%d\n", dev, dev->port_num);
 
-	ret = bam_data_connect(&dev->bam_port, dev->port_num, dev->port_num);
+	ret = bam_data_connect(&dev->bam_port, dev->port_num,
+		USB_GADGET_XPORT_BAM2BAM, dev->port_num, USB_FUNC_MBIM);
 	if (ret) {
 		pr_err("bam_data_setup failed: err:%d\n",
 				ret);
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
index 88d19f5..559fd04 100644
--- a/drivers/usb/gadget/f_qc_ecm.c
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2003-2005,2008 David Brownell
  * Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,11 @@
 
 /* #define VERBOSE_DEBUG */
 
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
@@ -29,6 +34,9 @@
 #include "u_ether.h"
 #include "u_qc_ether.h"
 
+#include "u_bam_data.h"
+#include <mach/ecm_ipa.h>
+
 
 /*
  * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
@@ -58,9 +66,9 @@
 };
 
 struct f_ecm_qc {
-	struct qc_gether			port;
+	struct qc_gether		port;
 	u8				ctrl_id, data_id;
-
+	enum transport_type		xport;
 	char				ethaddr[14];
 
 	struct usb_ep			*notify;
@@ -69,6 +77,16 @@
 	bool				is_open;
 };
 
+struct f_ecm_qc_ipa_params {
+	u8			dev_mac[ETH_ALEN];
+	u8			host_mac[ETH_ALEN];
+	ecm_ipa_callback	ipa_rx_cb;
+	ecm_ipa_callback	ipa_tx_cb;
+	void			*ipa_priv;
+};
+
+static struct f_ecm_qc_ipa_params ipa_params;
+
 static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
 {
 	return container_of(f, struct f_ecm_qc, port.func);
@@ -288,51 +306,6 @@
 
 static struct data_port ecm_qc_bam_port;
 
-static int ecm_qc_bam_setup(void)
-{
-	int ret;
-
-	ret = bam_data_setup(ECM_QC_NO_PORTS);
-	if (ret) {
-		pr_err("bam_data_setup failed err: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
-static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
-{
-	int ret;
-
-	ecm_qc_bam_port.cdev = dev->port.func.config->cdev;
-	ecm_qc_bam_port.in = dev->port.in_ep;
-	ecm_qc_bam_port.out = dev->port.out_ep;
-
-	/* currently we use the first connection */
-	ret = bam_data_connect(&ecm_qc_bam_port, 0, 0);
-	if (ret) {
-		pr_err("bam_data_connect failed: err:%d\n",
-				ret);
-		return ret;
-	} else {
-		pr_info("ecm bam connected\n");
-	}
-
-	return 0;
-}
-
-static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
-{
-	pr_debug("dev:%p. %s Disconnect BAM.\n", dev, __func__);
-
-	bam_data_disconnect(&ecm_qc_bam_port, 0);
-
-	return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
 static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
 {
 	struct usb_request		*req = ecm->notify_req;
@@ -401,6 +374,73 @@
 	ecm_qc_do_notify(ecm);
 }
 
+static int ecm_qc_bam_setup(void)
+{
+	int ret;
+
+	ret = bam_data_setup(ECM_QC_NO_PORTS);
+	if (ret) {
+		pr_err("bam_data_setup failed err: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
+{
+	int ret;
+
+	ecm_qc_bam_port.cdev = dev->port.func.config->cdev;
+	ecm_qc_bam_port.in = dev->port.in_ep;
+	ecm_qc_bam_port.out = dev->port.out_ep;
+
+	/* currently we use the first connection */
+	ret = bam_data_connect(&ecm_qc_bam_port, 0, dev->xport,
+					0, USB_FUNC_ECM);
+	if (ret) {
+		pr_err("bam_data_connect failed: err:%d\n", ret);
+		return ret;
+	} else {
+		pr_debug("ecm bam connected\n");
+	}
+
+	dev->is_open = true;
+	ecm_qc_notify(dev);
+
+	return 0;
+}
+
+static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
+{
+	pr_debug("dev:%p. Disconnect BAM.\n", dev);
+
+	bam_data_disconnect(&ecm_qc_bam_port, 0);
+
+	ecm_ipa_cleanup(ipa_params.ipa_priv);
+
+	return 0;
+}
+
+void *ecm_qc_get_ipa_rx_cb(void)
+{
+	return ipa_params.ipa_rx_cb;
+}
+
+void *ecm_qc_get_ipa_tx_cb(void)
+{
+	return ipa_params.ipa_tx_cb;
+}
+
+void *ecm_qc_get_ipa_priv(void)
+{
+	return ipa_params.ipa_priv;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+
 static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct f_ecm_qc			*ecm = req->context;
@@ -524,7 +564,8 @@
 			 * we can disconnect the port from the network layer.
 			 */
 			ecm_qc_bam_disconnect(ecm);
-			gether_qc_disconnect_name(&ecm->port, "ecm0");
+			if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+				gether_qc_disconnect_name(&ecm->port, "ecm0");
 		}
 
 		if (!ecm->port.in_ep->desc ||
@@ -553,9 +594,12 @@
 				);
 			ecm->port.cdc_filter = DEFAULT_FILTER;
 			DBG(cdev, "activate ecm\n");
-			net = gether_qc_connect_name(&ecm->port, "ecm0");
-			if (IS_ERR(net))
-				return PTR_ERR(net);
+			if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA) {
+				net = gether_qc_connect_name(&ecm->port,
+								"ecm0");
+				if (IS_ERR(net))
+					return PTR_ERR(net);
+			}
 
 			if (ecm_qc_bam_connect(ecm))
 				goto fail;
@@ -597,7 +641,8 @@
 
 	if (ecm->port.in_ep->driver_data) {
 		ecm_qc_bam_disconnect(ecm);
-		gether_qc_disconnect_name(&ecm->port, "ecm0");
+		if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+			gether_qc_disconnect_name(&ecm->port, "ecm0");
 	}
 
 	if (ecm->notify->driver_data) {
@@ -662,6 +707,7 @@
 	status = usb_interface_id(c, f);
 	if (status < 0)
 		goto fail;
+
 	ecm->ctrl_id = status;
 
 	ecm_qc_control_intf.bInterfaceNumber = status;
@@ -670,6 +716,7 @@
 	status = usb_interface_id(c, f);
 	if (status < 0)
 		goto fail;
+
 	ecm->data_id = status;
 
 	ecm_qc_data_nop_intf.bInterfaceNumber = status;
@@ -797,6 +844,7 @@
  * @c: the configuration to support the network link
  * @ethaddr: a buffer in which the ethernet address of the host side
  *	side of the link was recorded
+ * @xport_name: data path transport type name ("BAM2BAM" or "BAM2BAM_IPA")
  * Context: single threaded during gadget setup
  *
  * Returns zero on success, else negative errno.
@@ -805,7 +853,8 @@
  * for calling @gether_cleanup() before module unload.
  */
 int
-ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+	char *xport_name)
 {
 	struct f_ecm_qc		*ecm;
 	int		status;
@@ -819,6 +868,8 @@
 		return status;
 	}
 
+	pr_debug("data transport type is %s", xport_name);
+
 	/* maybe allocate device-global string IDs */
 	if (ecm_qc_string_defs[0].id == 0) {
 
@@ -849,11 +900,23 @@
 	if (!ecm)
 		return -ENOMEM;
 
+	ecm->xport = str_to_xport(xport_name);
+	pr_debug("set xport = %d", ecm->xport);
+
 	/* export host's Ethernet address in CDC format */
-	snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+	if (ecm->xport == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		gether_qc_get_macs(ipa_params.dev_mac, ipa_params.host_mac);
+		snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+		"%02X%02X%02X%02X%02X%02X",
+		ipa_params.host_mac[0], ipa_params.host_mac[1],
+		ipa_params.host_mac[2], ipa_params.host_mac[3],
+		ipa_params.host_mac[4], ipa_params.host_mac[5]);
+	} else
+		snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
 		"%02X%02X%02X%02X%02X%02X",
 		ethaddr[0], ethaddr[1], ethaddr[2],
 		ethaddr[3], ethaddr[4], ethaddr[5]);
+
 	ecm_qc_string_defs[1].s = ecm->ethaddr;
 
 	ecm->port.cdc_filter = DEFAULT_FILTER;
@@ -870,8 +933,31 @@
 
 	status = usb_add_function(c, &ecm->port.func);
 	if (status) {
+		pr_err("failed to add function");
+		ecm_qc_string_defs[1].s = NULL;
+		kfree(ecm);
+		return status;
+	}
+
+	if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
+		return status;
+
+	status = ecm_ipa_init(&ipa_params.ipa_rx_cb, &ipa_params.ipa_tx_cb,
+			&ipa_params.ipa_priv);
+	if (status) {
+		pr_err("failed to initialize ECM IPA Driver");
+		ecm_qc_string_defs[1].s = NULL;
+		kfree(ecm);
+		return status;
+	}
+
+	status = ecm_ipa_configure(ipa_params.host_mac, ipa_params.dev_mac,
+			ipa_params.ipa_priv);
+	if (status) {
+		pr_err("failed to configure ECM IPA Driver");
 		ecm_qc_string_defs[1].s = NULL;
 		kfree(ecm);
 	}
+
 	return status;
 }
diff --git a/drivers/usb/gadget/f_qc_rndis.c b/drivers/usb/gadget/f_qc_rndis.c
index 128b6d1..51d7bc1 100644
--- a/drivers/usb/gadget/f_qc_rndis.c
+++ b/drivers/usb/gadget/f_qc_rndis.c
@@ -6,7 +6,7 @@
  * Copyright (C) 2008 Nokia Corporation
  * Copyright (C) 2009 Samsung Electronics
  *			Author: Michal Nazarewicz (mina86@mina86.com)
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2
@@ -427,7 +427,8 @@
 	dev->bam_port.out = dev->port.out_ep;
 
 	/* currently we use the first connection */
-	ret = bam_data_connect(&dev->bam_port, 0, 0);
+	ret = bam_data_connect(&dev->bam_port, 0, USB_GADGET_XPORT_BAM2BAM,
+			0, USB_FUNC_RNDIS);
 	if (ret) {
 		pr_err("bam_data_connect failed: err:%d\n",
 				ret);
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 70c71d4..8df06a4 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,9 +22,10 @@
 #include <linux/usb/gadget.h>
 
 #include <mach/bam_dmux.h>
-#include <mach/usb_gadget_xport.h>
 #include <mach/usb_bam.h>
 
+#include "u_bam_data.h"
+
 #define BAM2BAM_DATA_N_PORTS	1
 
 static struct workqueue_struct *bam_data_wq;
@@ -34,12 +35,6 @@
 #define SPS_PARAMS_TBE		        BIT(6)
 #define MSM_VENDOR_ID			BIT(16)
 
-struct data_port {
-	struct usb_composite_dev	*cdev;
-	struct usb_ep			*in;
-	struct usb_ep			*out;
-};
-
 struct bam_data_ch_info {
 	unsigned long		flags;
 	unsigned		id;
@@ -53,6 +48,10 @@
 	u32			src_pipe_idx;
 	u32			dst_pipe_idx;
 	u8			connection_idx;
+
+	enum function_type			func_type;
+	enum transport_type			trans;
+	struct usb_bam_connect_ipa_params	ipa_params;
 };
 
 struct bam_data_port {
@@ -175,6 +174,22 @@
 	return 0;
 }
 
+static void bam2bam_data_disconnect_work(struct work_struct *w)
+{
+	struct bam_data_port *port =
+			container_of(w, struct bam_data_port, disconnect_w);
+	struct bam_data_ch_info *d = &port->data_ch;
+	int ret;
+
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		if (d->func_type == USB_FUNC_ECM)
+			ecm_ipa_disconnect(d->ipa_params.priv);
+		ret = usb_bam_disconnect_ipa(d->connection_idx, &d->ipa_params);
+		if (ret)
+			pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret);
+	}
+}
+
 static void bam2bam_data_connect_work(struct work_struct *w)
 {
 	struct bam_data_port *port = container_of(w, struct bam_data_port,
@@ -185,14 +200,49 @@
 
 	pr_debug("%s: Connect workqueue started", __func__);
 
-	ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
-						  &d->dst_pipe_idx);
-	d->src_pipe_idx = 11;
-	d->dst_pipe_idx = 10;
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		d->ipa_params.client = IPA_CLIENT_USB_CONS;
+		d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
+		if (d->func_type == USB_FUNC_ECM) {
+			d->ipa_params.notify = ecm_qc_get_ipa_tx_cb();
+			d->ipa_params.priv = ecm_qc_get_ipa_priv();
+		}
+		ret = usb_bam_connect_ipa(&d->ipa_params);
+		if (ret) {
+			pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+				__func__, ret);
+			return;
+		}
 
-	if (ret) {
-		pr_err("usb_bam_connect failed: err:%d\n", ret);
-		return;
+		d->ipa_params.client = IPA_CLIENT_USB_PROD;
+		d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
+		if (d->func_type == USB_FUNC_ECM) {
+			d->ipa_params.notify = ecm_qc_get_ipa_rx_cb();
+			d->ipa_params.priv = ecm_qc_get_ipa_priv();
+		}
+		ret = usb_bam_connect_ipa(&d->ipa_params);
+		if (ret) {
+			pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
+				__func__, ret);
+			return;
+		}
+		if (d->func_type == USB_FUNC_ECM) {
+			ret = ecm_ipa_connect(d->ipa_params.cons_clnt_hdl,
+				d->ipa_params.prod_clnt_hdl,
+				d->ipa_params.priv);
+			if (ret) {
+				pr_err("%s: failed to connect IPA: err:%d\n",
+					__func__, ret);
+				return;
+			}
+		}
+	} else { /* transport type is USB_GADGET_XPORT_BAM2BAM */
+		ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
+						  &d->dst_pipe_idx);
+		if (ret) {
+			pr_err("usb_bam_connect failed: err:%d\n", ret);
+			return;
+		}
 	}
 
 	if (!port->port_usb) {
@@ -230,15 +280,17 @@
 	bam_data_start_endless_rx(port);
 	bam_data_start_endless_tx(port);
 
-	/* Register for peer reset callback */
-	usb_bam_register_peer_reset_cb(d->connection_idx,
+	/* Register for peer reset callback if USB_GADGET_XPORT_BAM2BAM */
+	if (d->trans != USB_GADGET_XPORT_BAM2BAM_IPA) {
+		usb_bam_register_peer_reset_cb(d->connection_idx,
 			bam_data_peer_reset_cb, port);
 
-	ret = usb_bam_client_ready(true);
-	if (ret) {
-		pr_err("%s: usb_bam_client_ready failed: err:%d\n",
+		ret = usb_bam_client_ready(true);
+		if (ret) {
+			pr_err("%s: usb_bam_client_ready failed: err:%d\n",
 			__func__, ret);
-		return;
+			return;
+		}
 	}
 
 	pr_debug("%s: Connect workqueue done", __func__);
@@ -262,6 +314,7 @@
 	port->port_num = portno;
 
 	INIT_WORK(&port->connect_w, bam2bam_data_connect_work);
+	INIT_WORK(&port->disconnect_w, bam2bam_data_disconnect_work);
 
 	/* data ch */
 	d = &port->data_ch;
@@ -276,6 +329,7 @@
 void bam_data_disconnect(struct data_port *gr, u8 port_num)
 {
 	struct bam_data_port	*port;
+	struct bam_data_ch_info	*d;
 
 	pr_debug("dev:%p port#%d\n", gr, port_num);
 
@@ -285,7 +339,7 @@
 	}
 
 	if (!gr) {
-		pr_err("mbim data port is null\n");
+		pr_err("data port is null\n");
 		return;
 	}
 
@@ -303,12 +357,19 @@
 		port->port_usb = 0;
 	}
 
-	if (usb_bam_client_ready(false))
-		pr_err("%s: usb_bam_client_ready failed\n", __func__);
+	d = &port->data_ch;
+	if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
+		queue_work(gbam_wq, &port->disconnect_w);
+	else {
+		if (usb_bam_client_ready(false)) {
+			pr_err("%s: usb_bam_client_ready failed\n",
+				__func__);
+		}
+	}
 }
 
 int bam_data_connect(struct data_port *gr, u8 port_num,
-				 u8 connection_idx)
+	enum transport_type trans, u8 connection_idx, enum function_type func)
 {
 	struct bam_data_port	*port;
 	struct bam_data_ch_info	*d;
@@ -322,7 +383,7 @@
 	}
 
 	if (!gr) {
-		pr_err("mbim data port is null\n");
+		pr_err("data port is null\n");
 		return -ENODEV;
 	}
 
@@ -349,6 +410,16 @@
 
 	d->connection_idx = connection_idx;
 
+	d->trans = trans;
+
+	if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
+		d->ipa_params.src_pipe = &(d->src_pipe_idx);
+		d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
+		d->ipa_params.idx = connection_idx;
+	}
+
+	d->func_type = func;
+
 	queue_work(bam_data_wq, &port->connect_w);
 
 	return 0;
diff --git a/drivers/usb/gadget/u_bam_data.h b/drivers/usb/gadget/u_bam_data.h
new file mode 100644
index 0000000..71a01b9
--- /dev/null
+++ b/drivers/usb/gadget/u_bam_data.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_BAM_DATA_H
+#define __U_BAM_DATA_H
+
+#include <mach/usb_gadget_xport.h>
+
+enum function_type {
+	USB_FUNC_ECM,
+	USB_FUNC_MBIM,
+	USB_FUNC_RNDIS,
+};
+
+struct data_port {
+	struct usb_composite_dev	*cdev;
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+};
+
+void bam_data_disconnect(struct data_port *gr, u8 port_num);
+
+int bam_data_connect(struct data_port *gr, u8 port_num,
+	enum transport_type trans, u8 connection_idx, enum function_type func);
+
+int bam_data_setup(unsigned int no_bam2bam_port);
+
+void bam_data_suspend(u8 port_num);
+
+void bam_data_resume(u8 port_num);
+
+#endif /* __U_BAM_DATA_H */
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
index ce0a12e..e10ec25 100644
--- a/drivers/usb/gadget/u_qc_ether.c
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -4,7 +4,7 @@
  * Copyright (C) 2003-2005,2008 David Brownell
  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  * Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2
@@ -62,7 +62,7 @@
 	 * or updating its backlink port_usb->ioport
 	 */
 	spinlock_t		lock;
-	struct qc_gether		*port_usb;
+	struct qc_gether	*port_usb;
 
 	struct net_device	*net;
 	struct usb_gadget	*gadget;
@@ -235,6 +235,14 @@
 	.name	= "gadget",
 };
 
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN])
+{
+	if (get_qc_ether_addr(qc_dev_addr, dev_mac))
+		pr_debug("using random dev_mac ethernet address\n");
+	if (get_qc_ether_addr(qc_host_addr, host_mac))
+		pr_debug("using random host_mac ethernet address\n");
+}
+
 /**
  * gether_qc_setup - initialize one ethernet-over-usb link
  * @g: gadget to associated with these links
@@ -320,6 +328,7 @@
 
 /**
  * gether_qc_cleanup_name - remove Ethernet-over-USB device
+ * @netname: name for network device (for example, "usb")
  * Context: may sleep
  *
  * This is called to free all resources allocated by @gether_qc_setup().
@@ -343,6 +352,7 @@
  * is active
  * @link: the USB link, set up with endpoints, descriptors matching
  *	current device speed, and any framing wrapper(s) set up.
+ * @netname: name for network device (for example, "usb")
  * Context: irqs blocked
  *
  * This is called to let the network layer know the connection
@@ -391,6 +401,7 @@
  * gether_qc_disconnect_name - notify network layer that USB
  * link is inactive
  * @link: the USB link, on which gether_connect() was called
+ * @netname: name for network device (for example, "usb")
  * Context: irqs blocked
  *
  * This is called to let the network layer know the connection
diff --git a/drivers/usb/gadget/u_qc_ether.h b/drivers/usb/gadget/u_qc_ether.h
index 29193e0..25562da 100644
--- a/drivers/usb/gadget/u_qc_ether.h
+++ b/drivers/usb/gadget/u_qc_ether.h
@@ -4,7 +4,7 @@
  * Copyright (C) 2003-2005,2008 David Brownell
  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  * Copyright (C) 2008 Nokia Corporation
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2
@@ -49,7 +49,7 @@
 	struct usb_function		func;
 
 	/* updated by gether_{connect,disconnect} */
-	struct eth_qc_dev			*ioport;
+	struct eth_qc_dev		*ioport;
 
 	/* endpoints handle full and/or high speeds */
 	struct usb_ep			*in_ep;
@@ -61,10 +61,7 @@
 
 	/* hooks for added framing, as needed for RNDIS and EEM. */
 	u32				header_len;
-	/* NCM requires fixed size bundles */
-	bool				is_fixed;
-	u32				fixed_out_len;
-	u32				fixed_in_len;
+
 	struct sk_buff			*(*wrap)(struct qc_gether *port,
 						struct sk_buff *skb);
 	int				(*unwrap)(struct qc_gether *port,
@@ -89,10 +86,14 @@
 void gether_qc_disconnect_name(struct qc_gether *link, const char *netname);
 
 /* each configuration may bind one instance of an ethernet link */
-int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				char *xport_name);
 
 int
 rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
 					 u32 vendorID, const char *manufacturer,
 					 u8 maxPktPerXfer);
+
+void gether_qc_get_macs(u8 dev_mac[ETH_ALEN], u8 host_mac[ETH_ALEN]);
+
 #endif /* __U_QC_ETHER_H */
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 40e1eea..faa5625 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -45,6 +45,7 @@
 	struct ehci_hcd				ehci;
 	spinlock_t				wakeup_lock;
 	struct device				*dev;
+	struct clk				*xo_clk;
 	struct clk				*iface_clk;
 	struct clk				*core_clk;
 	struct clk				*alt_core_clk;
@@ -659,10 +660,14 @@
 	clk_disable_unprepare(mhcd->core_clk);
 
 	/* usb phy does not require TCXO clock, hence vote for TCXO disable */
-	ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
-	if (ret)
-		dev_err(mhcd->dev, "%s failed to devote for "
-			"TCXO D0 buffer%d\n", __func__, ret);
+	if (!IS_ERR(mhcd->xo_clk)) {
+		clk_disable_unprepare(mhcd->xo_clk);
+	} else {
+		ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
+		if (ret)
+			dev_err(mhcd->dev, "%s failed to devote for TCXO %d\n",
+								__func__, ret);
+	}
 
 	msm_ehci_config_vddcx(mhcd, 0);
 
@@ -714,10 +719,14 @@
 	wake_lock(&mhcd->wlock);
 
 	/* Vote for TCXO when waking up the phy */
-	ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
-	if (ret)
-		dev_err(mhcd->dev, "%s failed to vote for "
-			"TCXO D0 buffer%d\n", __func__, ret);
+	if (!IS_ERR(mhcd->xo_clk)) {
+		clk_prepare_enable(mhcd->xo_clk);
+	} else {
+		ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
+		if (ret)
+			dev_err(mhcd->dev, "%s failed to vote for TCXO D0 %d\n",
+								__func__, ret);
+	}
 
 	clk_prepare_enable(mhcd->core_clk);
 	clk_prepare_enable(mhcd->iface_clk);
@@ -1091,18 +1100,23 @@
 	}
 
 	snprintf(pdev_name, PDEV_NAME_LEN, "%s.%d", pdev->name, pdev->id);
-	mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name);
-	if (IS_ERR(mhcd->xo_handle)) {
-		dev_err(&pdev->dev, "%s not able to get the handle "
-			"to vote for TCXO D0 buffer\n", __func__);
-		ret = PTR_ERR(mhcd->xo_handle);
-		goto free_async_irq;
+	mhcd->xo_clk = clk_get(&pdev->dev, "xo");
+	if (!IS_ERR(mhcd->xo_clk)) {
+		ret = clk_prepare_enable(mhcd->xo_clk);
+	} else {
+		mhcd->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, pdev_name);
+		if (IS_ERR(mhcd->xo_handle)) {
+			dev_err(&pdev->dev, "%s fail to get handle for X0 D0\n",
+								__func__);
+			ret = PTR_ERR(mhcd->xo_handle);
+			goto free_async_irq;
+		} else {
+			ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
+		}
 	}
-
-	ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_ON);
 	if (ret) {
-		dev_err(&pdev->dev, "%s failed to vote for TCXO "
-			"D0 buffer%d\n", __func__, ret);
+		dev_err(&pdev->dev, "%s failed to vote for TCXO %d\n",
+								__func__, ret);
 		goto free_xo_handle;
 	}
 
@@ -1202,9 +1216,15 @@
 deinit_clocks:
 	msm_ehci_init_clocks(mhcd, 0);
 devote_xo_handle:
-	msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
+	if (!IS_ERR(mhcd->xo_clk))
+		clk_disable_unprepare(mhcd->xo_clk);
+	else
+		msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF);
 free_xo_handle:
-	msm_xo_put(mhcd->xo_handle);
+	if (!IS_ERR(mhcd->xo_clk))
+		clk_put(mhcd->xo_clk);
+	else
+		msm_xo_put(mhcd->xo_handle);
 free_async_irq:
 	if (mhcd->async_irq)
 		free_irq(mhcd->async_irq, mhcd);
@@ -1236,7 +1256,12 @@
 
 	usb_remove_hcd(hcd);
 
-	msm_xo_put(mhcd->xo_handle);
+	if (!IS_ERR(mhcd->xo_clk)) {
+		clk_disable_unprepare(mhcd->xo_clk);
+		clk_put(mhcd->xo_clk);
+	} else {
+		msm_xo_put(mhcd->xo_handle);
+	}
 	msm_ehci_vbus_power(mhcd, 0);
 	msm_ehci_init_vbus(mhcd, 0);
 	msm_ehci_ldo_enable(mhcd, 0);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 38a3c15..323b481 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -145,29 +145,37 @@
  */
 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 {
-	union xhci_trb *next;
 	unsigned long long addr;
 
 	ring->deq_updates++;
 
-	/* If this is not event ring, there is one more usable TRB */
+	/*
+	 * If this is not event ring, and the dequeue pointer
+	 * is not on a link TRB, there is one more usable TRB
+	 */
 	if (ring->type != TYPE_EVENT &&
 			!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
 		ring->num_trbs_free++;
-	next = ++(ring->dequeue);
 
-	/* Update the dequeue pointer further if that was a link TRB or we're at
-	 * the end of an event ring segment (which doesn't have link TRBS)
-	 */
-	while (last_trb(xhci, ring, ring->deq_seg, next)) {
-		if (ring->type == TYPE_EVENT &&	last_trb_on_last_seg(xhci,
-				ring, ring->deq_seg, next)) {
-			ring->cycle_state = (ring->cycle_state ? 0 : 1);
+	do {
+		/*
+		 * Update the dequeue pointer further if that was a link TRB or
+		 * we're at the end of an event ring segment (which doesn't have
+		 * link TRBS)
+		 */
+		if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
+			if (ring->type == TYPE_EVENT &&
+					last_trb_on_last_seg(xhci, ring,
+						ring->deq_seg, ring->dequeue)) {
+				ring->cycle_state = (ring->cycle_state ? 0 : 1);
+			}
+			ring->deq_seg = ring->deq_seg->next;
+			ring->dequeue = ring->deq_seg->trbs;
+		} else {
+			ring->dequeue++;
 		}
-		ring->deq_seg = ring->deq_seg->next;
-		ring->dequeue = ring->deq_seg->trbs;
-		next = ring->dequeue;
-	}
+	} while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+
 	addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
 }
 
@@ -885,6 +893,17 @@
 	num_trbs_free_temp = ep_ring->num_trbs_free;
 	dequeue_temp = ep_ring->dequeue;
 
+	/* If we get two back-to-back stalls, and the first stalled transfer
+	 * ends just before a link TRB, the dequeue pointer will be left on
+	 * the link TRB by the code in the while loop.  So we have to update
+	 * the dequeue pointer one segment further, or we'll jump off
+	 * the segment into la-la-land.
+	 */
+	if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+		ep_ring->deq_seg = ep_ring->deq_seg->next;
+		ep_ring->dequeue = ep_ring->deq_seg->trbs;
+	}
+
 	while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
 		/* We have more usable TRBs */
 		ep_ring->num_trbs_free++;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index c69071d..c03ca69 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -95,7 +95,7 @@
 
 static struct regulator *hsusb_3p3;
 static struct regulator *hsusb_1p8;
-static struct regulator *hsusb_vddcx;
+static struct regulator *hsusb_vdd;
 static struct regulator *vbus_otg;
 static struct regulator *mhl_usb_hs_switch;
 static struct power_supply *psy;
@@ -111,7 +111,7 @@
 #endif
 }
 
-static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
+static int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
 		{  /* VDD_CX CORNER Voting */
 			[VDD_NONE]	= RPM_VREG_CORNER_NONE,
 			[VDD_MIN]	= RPM_VREG_CORNER_NOMINAL,
@@ -175,7 +175,7 @@
 	int ret;
 
 	min_vol = vdd_val[vdd_type][!!high];
-	ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
+	ret = regulator_set_voltage(hsusb_vdd, min_vol, max_vol);
 	if (ret) {
 		pr_err("%s: unable to set the voltage for regulator "
 			"HSUSB_VDDCX\n", __func__);
@@ -984,12 +984,18 @@
 
 	/* usb phy no more require TCXO clock, hence vote for TCXO disable */
 	if (!host_bus_suspend) {
-		ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF);
-		if (ret)
-			dev_err(phy->dev, "%s failed to devote for "
-				"TCXO D0 buffer%d\n", __func__, ret);
-		else
+		if (!IS_ERR(motg->xo_clk)) {
+			clk_disable_unprepare(motg->xo_clk);
 			motg->lpm_flags |= XO_SHUTDOWN;
+		} else {
+			ret = msm_xo_mode_vote(motg->xo_handle,
+							MSM_XO_MODE_OFF);
+			if (ret)
+				dev_err(phy->dev, "%s fail to devote XO %d\n",
+								 __func__, ret);
+			else
+				motg->lpm_flags |= XO_SHUTDOWN;
+		}
 	}
 
 	if (motg->caps & ALLOW_PHY_POWER_COLLAPSE &&
@@ -1052,10 +1058,14 @@
 
 	/* Vote for TCXO when waking up the phy */
 	if (motg->lpm_flags & XO_SHUTDOWN) {
-		ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
-		if (ret)
-			dev_err(phy->dev, "%s failed to vote for "
-				"TCXO D0 buffer%d\n", __func__, ret);
+		if (!IS_ERR(motg->xo_clk)) {
+			clk_prepare_enable(motg->xo_clk);
+		} else {
+			ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
+			if (ret)
+				dev_err(phy->dev, "%s fail to vote for XO %d\n",
+								__func__, ret);
+		}
 		motg->lpm_flags &= ~XO_SHUTDOWN;
 	}
 
@@ -3792,6 +3802,8 @@
 static int __init msm_otg_probe(struct platform_device *pdev)
 {
 	int ret = 0;
+	int len = 0;
+	u32 tmp[3];
 	struct resource *res;
 	struct msm_otg *motg;
 	struct usb_phy *phy;
@@ -3919,42 +3931,69 @@
 		motg->async_irq = 0;
 	}
 
-	motg->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, "usb");
-	if (IS_ERR(motg->xo_handle)) {
-		dev_err(&pdev->dev, "%s not able to get the handle "
-			"to vote for TCXO D0 buffer\n", __func__);
-		ret = PTR_ERR(motg->xo_handle);
-		goto free_regs;
+	motg->xo_clk = clk_get(&pdev->dev, "xo");
+	if (IS_ERR(motg->xo_clk)) {
+		motg->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, "usb");
+		if (IS_ERR(motg->xo_handle)) {
+			dev_err(&pdev->dev, "%s fail to get handle for TCXO\n",
+								__func__);
+			ret = PTR_ERR(motg->xo_handle);
+			goto free_regs;
+		} else {
+			ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
+			if (ret) {
+				dev_err(&pdev->dev, "%s XO voting failed %d\n",
+								__func__, ret);
+				goto free_xo_handle;
+			}
+		}
+	} else {
+		ret = clk_prepare_enable(motg->xo_clk);
+		if (ret) {
+			dev_err(&pdev->dev, "%s failed to vote for TCXO %d\n",
+							__func__, ret);
+			goto free_xo_handle;
+		}
 	}
 
-	ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON);
-	if (ret) {
-		dev_err(&pdev->dev, "%s failed to vote for TCXO "
-			"D0 buffer%d\n", __func__, ret);
-		goto free_xo_handle;
-	}
 
 	clk_prepare_enable(motg->pclk);
 
 	motg->vdd_type = VDDCX_CORNER;
-	hsusb_vddcx = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig");
-	if (IS_ERR(hsusb_vddcx)) {
-		hsusb_vddcx = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
-		if (IS_ERR(hsusb_vddcx)) {
+	hsusb_vdd = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig");
+	if (IS_ERR(hsusb_vdd)) {
+		hsusb_vdd = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX");
+		if (IS_ERR(hsusb_vdd)) {
 			dev_err(motg->phy.dev, "unable to get hsusb vddcx\n");
-			ret = PTR_ERR(hsusb_vddcx);
+			ret = PTR_ERR(hsusb_vdd);
 			goto devote_xo_handle;
 		}
 		motg->vdd_type = VDDCX;
 	}
 
+	if (pdev->dev.of_node) {
+		of_get_property(pdev->dev.of_node,
+				"qcom,vdd-voltage-level",
+				&len);
+		if (len == sizeof(tmp)) {
+			of_property_read_u32_array(pdev->dev.of_node,
+					"qcom,vdd-voltage-level",
+					tmp, len/sizeof(*tmp));
+			vdd_val[motg->vdd_type][0] = tmp[0];
+			vdd_val[motg->vdd_type][1] = tmp[1];
+			vdd_val[motg->vdd_type][2] = tmp[2];
+		} else {
+			dev_dbg(&pdev->dev, "Using default hsusb vdd config.\n");
+		}
+	}
+
 	ret = msm_hsusb_config_vddcx(1);
 	if (ret) {
 		dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
 		goto devote_xo_handle;
 	}
 
-	ret = regulator_enable(hsusb_vddcx);
+	ret = regulator_enable(hsusb_vdd);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n");
 		goto free_config_vddcx;
@@ -3963,7 +4002,7 @@
 	ret = msm_hsusb_ldo_init(motg, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
-		goto free_hsusb_vddcx;
+		goto free_hsusb_vdd;
 	}
 
 	if (pdata->mhl_enable) {
@@ -4146,17 +4185,23 @@
 	msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
 free_ldo_init:
 	msm_hsusb_ldo_init(motg, 0);
-free_hsusb_vddcx:
-	regulator_disable(hsusb_vddcx);
+free_hsusb_vdd:
+	regulator_disable(hsusb_vdd);
 free_config_vddcx:
-	regulator_set_voltage(hsusb_vddcx,
+	regulator_set_voltage(hsusb_vdd,
 		vdd_val[motg->vdd_type][VDD_NONE],
 		vdd_val[motg->vdd_type][VDD_MAX]);
 devote_xo_handle:
 	clk_disable_unprepare(motg->pclk);
-	msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF);
+	if (!IS_ERR(motg->xo_clk))
+		clk_disable_unprepare(motg->xo_clk);
+	else
+		msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF);
 free_xo_handle:
-	msm_xo_put(motg->xo_handle);
+	if (!IS_ERR(motg->xo_clk))
+		clk_put(motg->xo_clk);
+	else
+		msm_xo_put(motg->xo_handle);
 free_regs:
 	iounmap(motg->regs);
 put_pclk:
@@ -4229,11 +4274,16 @@
 
 	clk_disable_unprepare(motg->pclk);
 	clk_disable_unprepare(motg->core_clk);
-	msm_xo_put(motg->xo_handle);
+	if (!IS_ERR(motg->xo_clk)) {
+		clk_disable_unprepare(motg->xo_clk);
+		clk_put(motg->xo_clk);
+	} else {
+		msm_xo_put(motg->xo_handle);
+	}
 	msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF);
 	msm_hsusb_ldo_init(motg, 0);
-	regulator_disable(hsusb_vddcx);
-	regulator_set_voltage(hsusb_vddcx,
+	regulator_disable(hsusb_vdd);
+	regulator_set_voltage(hsusb_vdd,
 		vdd_val[motg->vdd_type][VDD_NONE],
 		vdd_val[motg->vdd_type][VDD_MAX]);
 
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index ed0a385..a3d8d7e 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -276,6 +276,7 @@
 	struct mdp4_overlay_pipe *solidfill_pipe;
 };
 
+
 struct mdp4_overlay_pipe {
 	uint32 pipe_used;
 	uint32 pipe_type;		/* rgb, video/graphic */
@@ -983,6 +984,8 @@
 void mdp4_overlay_mdp_perf_upd(struct msm_fb_data_type *mfd, int flag);
 int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
 				struct mdp_blend_cfg *mdp_blend_cfg);
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+			struct mdp_mixer_cfg *mdp_mixer_cfg);
 u32 mdp4_get_mixer_num(u32 panel_type);
 
 #ifndef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index fbae011..bfd8238 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -49,6 +49,7 @@
 	struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
 	struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
 	struct mdp4_overlay_pipe sf_plist[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+	struct mdp_mixer_cfg mdp_mixer_cfg[MDP4_MIXER_MAX];
 	uint32 mixer_cfg[MDP4_MIXER_MAX];
 	uint32 flush[MDP4_MIXER_MAX];
 	struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
@@ -1456,6 +1457,87 @@
 			(pipe->element1 << 8) | pipe->element0;
 }
 
+static uint32 mdp4_overlayproc_cfg_wb_panel(struct mdp4_overlay_pipe *pipe,
+					char *overlay_base, uint32 curr)
+{
+	int off, bpp;
+	uint32 flag;
+	bool is_rgb = false;
+	struct mdp_mixer_cfg *mixer_cfg;
+
+	off = 0;
+	mixer_cfg = &ctrl->mdp_mixer_cfg[MDP4_MIXER2];
+
+	switch (mixer_cfg->writeback_format) {
+	case WB_FORMAT_RGB_888:
+		bpp = 3; /* RGB888 */
+		flag = 0x0;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_RGB_565:
+		bpp = 2; /* RGB565 */
+		flag = 0x1;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_xRGB_8888:
+		bpp = 4; /* xRGB8888 */
+		flag = 0x3;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_ARGB_8888:
+		bpp = 4; /* ARGB8888 */
+		flag = 0x80000003;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+		pr_warn("currently not supported ARGB_8888_INPUT_ALPHA\n");
+	default:
+		bpp = 1; /* NV12 */
+		is_rgb = false;
+		break;
+	}
+
+	if (is_rgb == true) {
+		if (pipe->ov_cnt & 0x01)
+			off = pipe->src_height * pipe->src_width * bpp;
+
+		outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+		/* overlay ouput is RGB888 */
+		outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
+		outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+		/* MDDI - BLT + on demand */
+		outpdw(overlay_base + 0x0004, 0x08);
+
+		curr = inpdw(overlay_base + 0x0014);
+		curr &= 0x4;
+
+		outpdw(overlay_base + 0x0014, curr | flag);
+	} else {
+		if (pipe->ov_cnt & 0x01)
+			off = pipe->src_height * pipe->src_width * bpp;
+
+		outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+		/* overlay ouput is RGB888 */
+		outpdw(overlay_base + 0x0010, ((pipe->src_width << 16) |
+				pipe->src_width));
+		outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+		off = pipe->src_height * pipe->src_width;
+		/* align chroma to 2k address */
+		off = (off + 2047) & ~2047;
+		/* UV plane adress */
+		outpdw(overlay_base + 0x0020, pipe->ov_blt_addr + off);
+		/* MDDI - BLT + on demand */
+		outpdw(overlay_base + 0x0004, 0x08);
+		/* pseudo planar + writeback */
+		curr = inpdw(overlay_base + 0x0014);
+		curr &= 0x4;
+		outpdw(overlay_base + 0x0014, curr | 0x012);
+		/* rgb->yuv */
+		outpdw(overlay_base + 0x0200, 0x05);
+	}
+	return curr;
+}
+
 /*
  * mdp4_overlayproc_cfg: only be called from base layer
  */
@@ -1515,34 +1597,8 @@
 #endif
 		} else if (pipe->mixer_num == MDP4_MIXER2) {
 			if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
-				off = 0;
-				bpp = 1;
-				if (pipe->ov_cnt & 0x01)
-					off = pipe->src_height *
-							pipe->src_width * bpp;
-
-				outpdw(overlay_base + 0x000c,
-						pipe->ov_blt_addr + off);
-				/* overlay ouput is RGB888 */
-				outpdw(overlay_base + 0x0010,
-					((pipe->src_width << 16) |
-					 pipe->src_width));
-				outpdw(overlay_base + 0x001c,
-						pipe->ov_blt_addr + off);
-				off = pipe->src_height * pipe->src_width;
-				/* align chroma to 2k address */
-				off = (off + 2047) & ~2047;
-				/* UV plane adress */
-				outpdw(overlay_base + 0x0020,
-						pipe->ov_blt_addr + off);
-				/* MDDI - BLT + on demand */
-				outpdw(overlay_base + 0x0004, 0x08);
-				/* pseudo planar + writeback */
-				curr = inpdw(overlay_base + 0x0014);
-				curr &= 0x4;
-				outpdw(overlay_base + 0x0014, curr | 0x012);
-				/* rgb->yuv */
-				outpdw(overlay_base + 0x0200, 0x05);
+				curr = mdp4_overlayproc_cfg_wb_panel(pipe,
+							overlay_base, curr);
 			}
 		}
 	} else {
@@ -3865,6 +3921,42 @@
 	mutex_unlock(&mfd->dma->ov_mutex);
 	return err;
 }
+
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+				struct mdp_mixer_cfg *mdp_mixer_cfg)
+{
+	int ret = 0;
+	u32 mixer_num;
+	struct mdp_mixer_cfg *mixer;
+
+	mixer_num = mdp4_get_mixer_num(mfd->panel_info.type);
+	if (!ctrl) {
+		pr_warn("mdp4_overlay_ctrl is NULL\n");
+		return -EPERM;
+	}
+	mixer = &ctrl->mdp_mixer_cfg[mixer_num];
+
+	switch (mdp_mixer_cfg->writeback_format) {
+	case WB_FORMAT_RGB_888:
+	case WB_FORMAT_RGB_565:
+	case WB_FORMAT_NV12:
+	case WB_FORMAT_xRGB_8888:
+	case WB_FORMAT_ARGB_8888:
+		mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+		break;
+	case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+		mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+		mixer->alpha = mdp_mixer_cfg->alpha;
+		break;
+	default:
+		mixer->writeback_format = WB_FORMAT_NV12;
+		pr_warn("Unsupported format request, setting to NV12\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
 int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
 			struct mdp_blend_cfg *mdp_blend_cfg)
 {
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index b6882b8..8ceb62e 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -76,9 +76,7 @@
 	u32 mdp_irq_mask;
 	u32 mdp_hist_irq_mask;
 
-	u32 suspend;
-	u32 timeout;
-
+	int suspend_fs_ena;
 	atomic_t clk_ref;
 	u8 clk_ena;
 	u8 fs_ena;
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 54eaabb..99eea82 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -135,8 +135,8 @@
 			pr_err("%s: Failed to enable regulator.\n", __func__);
 			return ret;
 		}
-
-		mdss_dsi_panel_reset(pdata, 1);
+		if (pdata->panel_info.panel_power_on == 0)
+			mdss_dsi_panel_reset(pdata, 1);
 
 	} else {
 
@@ -222,6 +222,13 @@
 		return -EINVAL;
 	}
 
+	if (!pdata->panel_info.panel_power_on) {
+		pr_warn("%s:%d Panel already off.\n", __func__, __LINE__);
+		return -EPERM;
+	}
+
+	pdata->panel_info.panel_power_on = 0;
+
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 	mdss_dsi_clk_disable(pdata);
@@ -241,7 +248,38 @@
 	return ret;
 }
 
-static int mdss_dsi_on(struct mdss_panel_data *pdata)
+int mdss_dsi_cont_splash_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mipi_panel_info *mipi;
+
+	pr_info("%s:%d DSI on for continuous splash.\n", __func__, __LINE__);
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	mipi  = &pdata->panel_info.mipi;
+
+	ret = mdss_dsi_panel_power_on(pdata, 1);
+	if (ret) {
+		pr_err("%s: Panel power on failed\n", __func__);
+		return ret;
+	}
+	mdss_dsi_sw_reset(pdata);
+	mdss_dsi_host_init(mipi, pdata);
+
+	pdata->panel_info.panel_power_on = 1;
+
+	mdss_dsi_op_mode_config(mipi->mode, pdata);
+
+	pr_debug("%s-:End\n", __func__);
+	return ret;
+}
+
+
+int mdss_dsi_on(struct mdss_panel_data *pdata)
 {
 	int ret = 0;
 	u32 clk_rate;
@@ -257,6 +295,11 @@
 		return -EINVAL;
 	}
 
+	if (pdata->panel_info.panel_power_on) {
+		pr_warn("%s:%d Panel already on.\n", __func__, __LINE__);
+		return 0;
+	}
+
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 	pinfo = &pdata->panel_info;
@@ -267,6 +310,8 @@
 		return ret;
 	}
 
+	pdata->panel_info.panel_power_on = 1;
+
 	mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
 	mdss_dsi_phy_init(pdata);
 
@@ -394,6 +439,16 @@
 		}
 		rc = mdss_dsi_off(pdata);
 		break;
+	case MDSS_EVENT_CONT_SPLASH_FINISH:
+		if (ctrl_pdata->on_cmds->ctrl_state == DSI_LP_MODE) {
+			rc = mdss_dsi_cont_splash_on(pdata);
+		} else {
+			pr_debug("%s:event=%d, Dsi On not called: ctrl_state: %d\n",
+				 __func__, event,
+				 ctrl_pdata->on_cmds->ctrl_state);
+			rc = -EINVAL;
+		}
+		break;
 	default:
 		pr_debug("%s: unhandled event=%d\n", __func__, event);
 		break;
@@ -542,6 +597,7 @@
 	struct platform_device *ctrl_pdev = NULL;
 	unsigned char *ctrl_addr;
 	bool broadcast;
+	bool cont_splash_enabled = false;
 
 	h_period = ((panel_data->panel_info.lcdc.h_pulse_width)
 			+ (panel_data->panel_info.lcdc.h_back_porch)
@@ -649,13 +705,6 @@
 			gpio_free(ctrl_pdata->disp_en_gpio);
 			return -ENODEV;
 		}
-		rc = gpio_direction_output(ctrl_pdata->disp_en_gpio, 1);
-		if (rc) {
-			pr_err("set_direction for disp_en gpio failed, rc=%d\n",
-			       rc);
-			gpio_free(ctrl_pdata->disp_en_gpio);
-			return -ENODEV;
-		}
 	}
 
 	ctrl_pdata->rst_gpio = of_get_named_gpio(pdev->dev.of_node,
@@ -705,6 +754,28 @@
 	/*
 	 * register in mdp driver
 	 */
+
+	cont_splash_enabled = of_property_read_bool(pdev->dev.of_node,
+			"qcom,cont-splash-enabled");
+	if (!cont_splash_enabled) {
+		pr_info("%s:%d Continous splash flag not found.\n",
+				__func__, __LINE__);
+		ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 0;
+		ctrl_pdata->panel_data.panel_info.panel_power_on = 0;
+	} else {
+		pr_info("%s:%d Continous splash flag enabled.\n",
+				__func__, __LINE__);
+
+		ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 1;
+		ctrl_pdata->panel_data.panel_info.panel_power_on = 1;
+	}
+
+
+	if (ctrl_pdata->panel_data.panel_info.cont_splash_enabled) {
+		mdss_dsi_prepare_clocks(ctrl_pdata);
+		mdss_dsi_clk_enable(&(ctrl_pdata->panel_data));
+	}
+
 	rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
 	if (rc) {
 		dev_err(&pdev->dev, "unable to register MIPI DSI panel\n");
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 74e6a95..1e0de89 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -386,6 +386,7 @@
 			return ret;
 		}
 		mfd->op_enable = false;
+		fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
 	}
 
 	return 0;
@@ -417,6 +418,8 @@
 					mfd->op_enable);
 		if (ret)
 			pr_warn("can't turn on display!\n");
+		else
+			fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
 	}
 	mfd->is_power_setting = false;
 	complete_all(&mfd->power_set_comp);
@@ -424,39 +427,61 @@
 	return ret;
 }
 
-int mdss_fb_suspend_all(void)
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_fb_suspend(struct platform_device *pdev, pm_message_t state)
 {
-	struct fb_info *fbi;
-	int ret, i;
-	int result = 0;
-	for (i = 0; i < fbi_list_index; i++) {
-		fbi = fbi_list[i];
-		fb_set_suspend(fbi, FBINFO_STATE_SUSPENDED);
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	if (!mfd)
+		return -ENODEV;
 
-		ret = mdss_fb_suspend_sub(fbi->par);
-		if (ret != 0) {
-			fb_set_suspend(fbi, FBINFO_STATE_RUNNING);
-			result = ret;
-		}
-	}
-	return result;
+	dev_dbg(&pdev->dev, "display suspend\n");
+
+	return mdss_fb_suspend_sub(mfd);
 }
 
-int mdss_fb_resume_all(void)
+static int mdss_fb_resume(struct platform_device *pdev)
 {
-	struct fb_info *fbi;
-	int ret, i;
-	int result = 0;
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	if (!mfd)
+		return -ENODEV;
 
-	for (i = 0; i < fbi_list_index; i++) {
-		fbi = fbi_list[i];
+	dev_dbg(&pdev->dev, "display resume\n");
 
-		ret = mdss_fb_resume_sub(fbi->par);
-		if (ret == 0)
-			fb_set_suspend(fbi, FBINFO_STATE_RUNNING);
-	}
-	return result;
+	return mdss_fb_resume_sub(mfd);
 }
+#else
+#define mdss_fb_suspend NULL
+#define mdss_fb_resume NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int mdss_fb_pm_suspend(struct device *dev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm suspend\n");
+
+	return mdss_fb_suspend_sub(mfd);
+}
+
+static int mdss_fb_pm_resume(struct device *dev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm resume\n");
+
+	return mdss_fb_resume_sub(mfd);
+}
+#endif
+
+static const struct dev_pm_ops mdss_fb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mdss_fb_pm_suspend, mdss_fb_pm_resume)
+};
 
 static const struct of_device_id mdss_fb_dt_match[] = {
 	{ .compatible = "qcom,mdss-fb",},
@@ -467,9 +492,12 @@
 static struct platform_driver mdss_fb_driver = {
 	.probe = mdss_fb_probe,
 	.remove = mdss_fb_remove,
+	.suspend = mdss_fb_suspend,
+	.resume = mdss_fb_resume,
 	.driver = {
 		.name = "mdss_fb",
 		.of_match_table = mdss_fb_dt_match,
+		.pm = &mdss_fb_pm_ops,
 	},
 };
 
@@ -732,11 +760,9 @@
 			return -ENOMEM;
 		}
 		phys = memory_pool_node_paddr(virt);
-		if (is_mdss_iommu_attached()) {
-			dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
-			msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K,
-						    0, &(mfd->iova));
-		}
+		dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+		msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K,
+					    0, &(mfd->iova));
 		pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
 			size, virt, phys, mfd->index);
 	} else {
@@ -1043,7 +1069,15 @@
 	int i, ret = 0;
 	/* buf sync */
 	for (i = 0; i < mfd->acq_fen_cnt; i++) {
-		ret = sync_fence_wait(mfd->acq_fen[i], WAIT_FENCE_TIMEOUT);
+		ret = sync_fence_wait(mfd->acq_fen[i],
+				WAIT_FENCE_FIRST_TIMEOUT);
+		if (ret == -ETIME) {
+			pr_warn("sync_fence_wait timed out! ");
+			pr_cont("Waiting %ld more seconds\n",
+					WAIT_FENCE_FINAL_TIMEOUT/MSEC_PER_SEC);
+			ret = sync_fence_wait(mfd->acq_fen[i],
+					WAIT_FENCE_FINAL_TIMEOUT);
+		}
 		if (ret < 0) {
 			pr_err("%s: sync_fence_wait failed! ret = %x\n",
 				__func__, ret);
@@ -1063,7 +1097,8 @@
 
 static void mdss_fb_signal_timeline_locked(struct msm_fb_data_type *mfd)
 {
-	if (mfd->timeline) {
+	if (mfd->timeline && !list_empty((const struct list_head *)
+				(&(mfd->timeline->obj.active_list_head)))) {
 		sw_sync_timeline_inc(mfd->timeline, 1);
 		mfd->timeline_value++;
 	}
@@ -1844,6 +1879,16 @@
 		fb_pdev->dev.platform_data = pdata;
 	}
 
+	/*
+	 * Clocks are already on if continuous splash is enabled,
+	 * increasing ref_cnt to help balance clocks once done.
+	 */
+	if (pdata->panel_info.cont_splash_enabled) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		mdss_mdp_footswitch_ctrl_splash(1);
+		mdss_mdp_copy_splash_screen(pdata);
+	}
+
 mdss_notfound:
 	of_node_put(node);
 
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 193b6b7..db2e305 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -27,10 +27,11 @@
 #define MSM_FB_MAX_DEV_LIST 32
 
 #define MSM_FB_ENABLE_DBGFS
-/* 900 ms for fence time out */
-#define WAIT_FENCE_TIMEOUT 900
-/* 950 ms for display operation time out */
-#define WAIT_DISP_OP_TIMEOUT 950
+#define WAIT_FENCE_FIRST_TIMEOUT MSEC_PER_SEC
+#define WAIT_FENCE_FINAL_TIMEOUT (10 * MSEC_PER_SEC)
+/* Display op timeout should be greater than total timeout */
+#define WAIT_DISP_OP_TIMEOUT ((WAIT_FENCE_FIRST_TIMEOUT + \
+		WAIT_FENCE_FINAL_TIMEOUT) * MDP_MAX_FENCE_FD)
 
 #ifndef MAX
 #define  MAX(x, y) (((x) > (y)) ? (x) : (y))
@@ -147,8 +148,6 @@
 int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num);
 void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl);
 void mdss_fb_update_backlight(struct msm_fb_data_type *mfd);
-int mdss_fb_suspend_all(void);
-int mdss_fb_resume_all(void);
 void mdss_fb_wait_for_fence(struct msm_fb_data_type *mfd);
 void mdss_fb_signal_timeline(struct msm_fb_data_type *mfd);
 
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 308ae87..e4099ad 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -54,6 +54,9 @@
 
 struct mdss_data_type *mdss_res;
 
+#define IB_QUOTA 800000000
+#define AB_QUOTA 800000000
+
 static DEFINE_SPINLOCK(mdp_lock);
 static DEFINE_MUTEX(mdp_clk_lock);
 
@@ -644,7 +647,7 @@
 	int i;
 
 	if (mdata->iommu_attached) {
-		pr_warn("mdp iommu already attached\n");
+		pr_debug("mdp iommu already attached\n");
 		return 0;
 	}
 
@@ -751,7 +754,7 @@
 	return 0;
 }
 
-static int mdss_hw_init(struct mdss_data_type *mdata)
+int mdss_hw_init(struct mdss_data_type *mdata)
 {
 	int i, j;
 	char *offset;
@@ -794,11 +797,9 @@
 	}
 
 	mdata->res_init = true;
-	mdata->timeout = HZ/20;
 	mdata->clk_ena = false;
 	mdata->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
 	mdata->irq_ena = false;
-	mdata->suspend = false;
 
 	rc = mdss_mdp_irq_clk_setup(mdata);
 	if (rc)
@@ -823,6 +824,21 @@
 	return rc;
 }
 
+void mdss_mdp_footswitch_ctrl_splash(int on)
+{
+	if (mdss_res != NULL) {
+		if (on) {
+			pr_debug("Enable MDP FS for splash.\n");
+			regulator_enable(mdss_res->fs);
+		} else {
+			pr_debug("Disable MDP FS for splash.\n");
+			regulator_disable(mdss_res->fs);
+		}
+	} else {
+		pr_warn("mdss mdata not initialized\n");
+	}
+}
+
 static int mdss_mdp_probe(struct platform_device *pdev)
 {
 	struct resource *res;
@@ -915,6 +931,7 @@
 		pr_err("unable to register bus scaling\n");
 		goto probe_done;
 	}
+	mdss_mdp_bus_scale_set_quota(AB_QUOTA, IB_QUOTA);
 
 	rc = mdss_mdp_debug_init(mdata);
 	if (rc) {
@@ -1330,49 +1347,42 @@
 	if (!mdata->fs)
 		return;
 
-	if (on && !mdata->fs_ena) {
+	if (on) {
 		pr_debug("Enable MDP FS\n");
-		regulator_enable(mdata->fs);
-		mdss_iommu_attach(mdata);
-		mdss_hw_init(mdata);
+		if (!mdata->fs_ena)
+			regulator_enable(mdata->fs);
 		mdata->fs_ena = true;
-	} else if (!on && mdata->fs_ena) {
+	} else {
 		pr_debug("Disable MDP FS\n");
 		mdss_iommu_dettach(mdata);
-		regulator_disable(mdata->fs);
+		if (mdata->fs_ena)
+			regulator_disable(mdata->fs);
 		mdata->fs_ena = false;
 	}
 }
 
 static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
 {
-	int ret;
+	flush_workqueue(mdata->clk_ctrl_wq);
 
-	ret = mdss_fb_suspend_all();
-	if (IS_ERR_VALUE(ret)) {
-		pr_err("Unable to suspend all fb panels (%d)\n", ret);
-		return ret;
-	}
+	mdata->suspend_fs_ena = mdata->fs_ena;
+	mdss_mdp_footswitch_ctrl(mdata, false);
 
-	pr_debug("suspend done\n");
+	pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
 
 	return 0;
 }
 
 static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
 {
-	int ret = 0;
+	if (mdata->suspend_fs_ena)
+		mdss_mdp_footswitch_ctrl(mdata, true);
 
-	ret = mdss_fb_resume_all();
-	if (IS_ERR_VALUE(ret))
-		pr_err("Unable to resume all fb panels (%d)\n", ret);
+	pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
 
-	pr_debug("resume done\n");
-
-	return ret;
+	return 0;
 }
 
-#ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
 static int mdss_mdp_pm_suspend(struct device *dev)
 {
@@ -1399,10 +1409,9 @@
 
 	return mdss_mdp_resume_sub(mdata);
 }
+#endif
 
-#define mdss_mdp_suspend NULL
-#define mdss_mdp_resume NULL
-#else
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
 static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
@@ -1426,6 +1435,9 @@
 
 	return mdss_mdp_resume_sub(mdata);
 }
+#else
+#define mdss_mdp_suspend NULL
+#define mdss_mdp_resume NULL
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
@@ -1471,7 +1483,6 @@
 	return 0;
 }
 #endif
-#endif
 
 static const struct dev_pm_ops mdss_mdp_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 29bc79a..e4f78ad 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -27,7 +27,7 @@
 #define MDSS_MDP_CURSOR_HEIGHT 64
 #define MDSS_MDP_CURSOR_SIZE (MDSS_MDP_CURSOR_WIDTH*MDSS_MDP_CURSOR_WIDTH*4)
 
-#define MDP_CLK_DEFAULT_RATE	37500000
+#define MDP_CLK_DEFAULT_RATE	200000000
 #define PHASE_STEP_SHIFT	21
 #define MAX_MIXER_WIDTH		2048
 #define MAX_MIXER_HEIGHT	2400
@@ -295,6 +295,8 @@
 }
 
 irqreturn_t mdss_mdp_isr(int irq, void *ptr);
+int mdss_iommu_attach(struct mdss_data_type *mdata);
+int mdss_mdp_copy_splash_screen(struct mdss_panel_data *pdata);
 int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num);
 void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num);
 int mdss_mdp_hist_irq_enable(u32 irq);
@@ -303,6 +305,7 @@
 int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
 			       void (*fnc_ptr)(void *), void *arg);
 
+void mdss_mdp_footswitch_ctrl_splash(int on);
 int mdss_mdp_bus_scale_set_quota(u64 ab_quota, u64 ib_quota);
 void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
 unsigned long mdss_mdp_get_clk_rate(u32 clk_idx);
@@ -320,6 +323,7 @@
 
 struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
 				       struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl);
 int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
 		struct mdss_panel_data *pdata);
 int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl);
@@ -352,6 +356,8 @@
 int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op);
 void mdss_mdp_pipe_sspp_term(struct mdss_mdp_pipe *pipe);
 
+int mdss_hw_init(struct mdss_data_type *mdata);
+
 int mdss_mdp_pa_config(struct mdss_mdp_ctl *ctl,
 				struct mdp_pa_cfg_data *config,
 				u32 *copyback);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 33af02d..cabb183 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -200,8 +200,8 @@
 			max_clk_rate = clk_rate;
 	}
 
-	/* request minimum bandwidth for dsi commands */
-	if ((total_ib_quota == 0) && (ctl->intf_type == MDSS_INTF_DSI))
+	/* request minimum bandwidth to have bus clock on when display is on */
+	if (total_ib_quota == 0)
 		total_ib_quota = SZ_16M >> MDSS_MDP_BUS_FACTOR_SHIFT;
 
 	if (max_clk_rate != ctl->clk_rate) {
@@ -278,7 +278,7 @@
 }
 
 static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
-		struct mdss_mdp_ctl *ctl, u32 type)
+		struct mdss_mdp_ctl *ctl, u32 type, int mux)
 {
 	struct mdss_mdp_mixer *mixer = NULL;
 	u32 nmixers_intf;
@@ -295,7 +295,6 @@
 	nmixers_wb = ctl->mdata->nmixers_wb;
 
 	switch (type) {
-
 	case MDSS_MDP_MIXER_TYPE_INTF:
 		mixer_pool = ctl->mdata->mixer_intf;
 		nmixers = nmixers_intf;
@@ -312,6 +311,15 @@
 		break;
 	}
 
+	/* early mdp revision only supports mux of dual pipe on mixers 0 and 1,
+	 * need to ensure that these pipes are readily available by using
+	 * mixer 2 if available and mux is not required */
+	if (!mux && (ctl->mdata->mdp_rev == MDSS_MDP_HW_REV_100) &&
+			(type == MDSS_MDP_MIXER_TYPE_INTF) &&
+			(nmixers >= MDSS_MDP_INTF_LAYERMIXER2) &&
+			(mixer_pool[MDSS_MDP_INTF_LAYERMIXER2].ref_cnt == 0))
+		mixer_pool += MDSS_MDP_INTF_LAYERMIXER2;
+
 	for (i = 0; i < nmixers; i++) {
 		mixer = mixer_pool + i;
 		if (mixer->ref_cnt == 0) {
@@ -356,7 +364,7 @@
 	if (!ctl)
 		return NULL;
 
-	mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK);
+	mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK, false);
 	if (!mixer)
 		goto error;
 
@@ -435,7 +443,7 @@
 	return NULL;
 }
 
-static int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
 {
 	struct mdss_mdp_ctl *split_ctl;
 	u32 width, height;
@@ -461,7 +469,8 @@
 
 	if (!ctl->mixer_left) {
 		ctl->mixer_left =
-			mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF);
+			mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
+					(width > MAX_MIXER_WIDTH));
 		if (!ctl->mixer_left) {
 			pr_err("unable to allocate layer mixer\n");
 			return -ENOMEM;
@@ -482,7 +491,7 @@
 	if (width < ctl->width) {
 		if (ctl->mixer_right == NULL) {
 			ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
-					MDSS_MDP_MIXER_TYPE_INTF);
+					MDSS_MDP_MIXER_TYPE_INTF, true);
 			if (!ctl->mixer_right) {
 				pr_err("unable to allocate right mixer\n");
 				if (ctl->mixer_left)
@@ -617,14 +626,15 @@
 	sctl->width = pdata->panel_info.xres;
 	sctl->height = pdata->panel_info.yres;
 
-	ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF);
+	ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
+			false);
 	if (!ctl->mixer_left) {
 		pr_err("unable to allocate layer mixer\n");
 		mdss_mdp_ctl_destroy(sctl);
 		return -ENOMEM;
 	}
 
-	mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF);
+	mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false);
 	if (!mixer) {
 		pr_err("unable to allocate layer mixer\n");
 		mdss_mdp_ctl_destroy(sctl);
@@ -756,14 +766,15 @@
 	struct mdss_mdp_ctl *sctl;
 	int ret = 0;
 
+	if (ctl->power_on) {
+		pr_debug("%s:%d already on!\n", __func__, __LINE__);
+		return 0;
+	}
+
 	ret = mdss_mdp_ctl_setup(ctl);
 	if (ret)
 		return ret;
 
-	if (ctl->power_on) {
-		WARN(1, "already on!\n");
-		return 0;
-	}
 
 	sctl = mdss_mdp_get_split_ctl(ctl);
 
@@ -813,7 +824,7 @@
 	int ret = 0;
 
 	if (!ctl->power_on) {
-		WARN(1, "already off!\n");
+		pr_debug("%s %d already off!\n", __func__, __LINE__);
 		return 0;
 	}
 
@@ -866,7 +877,7 @@
 	struct mdss_mdp_pipe *pipe;
 	u32 off, blend_op, blend_stage;
 	u32 mixercfg = 0, blend_color_out = 0, bgalpha = 0;
-	int stage;
+	int stage, secure = 0;
 
 	if (!mixer)
 		return -ENODEV;
@@ -880,6 +891,7 @@
 		mixercfg = 1 << (3 * pipe->num);
 		if (pipe->src_fmt->alpha_enable)
 			bgalpha = 1;
+		secure = pipe->flags & MDP_SECURE_OVERLAY_SESSION;
 	}
 
 	for (stage = MDSS_MDP_STAGE_0; stage < MDSS_MDP_MAX_STAGE; stage++) {
@@ -897,7 +909,8 @@
 
 		if (pipe->is_fg) {
 			bgalpha = 0;
-			mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+			if (!secure)
+				mixercfg = MDSS_MDP_LM_BORDER_COLOR;
 
 			blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
 				    MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index 1c5c4b8..d4ffaff 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -221,6 +221,7 @@
 #define MDSS_MDP_SCALE_FILTER_CA		0x3
 #define MDSS_MDP_SCALEY_EN			BIT(1)
 #define MDSS_MDP_SCALEX_EN			BIT(0)
+#define MDSS_MDP_FMT_SOLID_FILL			0x4037FF
 
 #define MDSS_MDP_NUM_REG_MIXERS 3
 #define MDSS_MDP_NUM_WB_MIXERS 2
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 058a46d..daa2499 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -20,9 +20,11 @@
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 #include <linux/uaccess.h>
+#include <linux/delay.h>
 
 #include <mach/iommu_domains.h>
 
+#include "mdss.h"
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 #include "mdss_mdp_rotator.h"
@@ -498,6 +500,125 @@
 	return 0;
 }
 
+int mdss_mdp_copy_splash_screen(struct mdss_panel_data *pdata)
+{
+	void *virt = NULL;
+	unsigned long bl_fb_addr = 0;
+	unsigned long *bl_fb_addr_va;
+	unsigned long  pipe_addr, pipe_src_size;
+	u32 height, width, rgb_size, bpp;
+	size_t size;
+	static struct ion_handle *ihdl;
+	struct ion_client *iclient = mdss_get_ionclient();
+	static ion_phys_addr_t phys;
+
+	pipe_addr = MDSS_MDP_REG_SSPP_OFFSET(3) +
+		MDSS_MDP_REG_SSPP_SRC0_ADDR;
+	pipe_src_size =
+		MDSS_MDP_REG_SSPP_OFFSET(3) + MDSS_MDP_REG_SSPP_SRC_SIZE;
+
+	bpp        = 3;
+	rgb_size   = MDSS_MDP_REG_READ(pipe_src_size);
+	bl_fb_addr = MDSS_MDP_REG_READ(pipe_addr);
+
+	height = (rgb_size >> 16) & 0xffff;
+	width  = rgb_size & 0xffff;
+	size = PAGE_ALIGN(height * width * bpp);
+	pr_debug("%s:%d splash_height=%d splash_width=%d Buffer size=%d\n",
+			__func__, __LINE__, height, width, size);
+
+	ihdl = ion_alloc(iclient, size, SZ_1M,
+			ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(ihdl)) {
+		pr_err("unable to alloc fbmem from ion (%p)\n", ihdl);
+		return -ENOMEM;
+	}
+
+	pdata->panel_info.splash_ihdl = ihdl;
+
+	virt = ion_map_kernel(iclient, ihdl);
+	ion_phys(iclient, ihdl, &phys, &size);
+
+	pr_debug("%s %d Allocating %u bytes at 0x%lx (%lx phys)\n",
+			__func__, __LINE__, size,
+			(unsigned long int)virt, phys);
+
+	bl_fb_addr_va = (unsigned long *)ioremap(bl_fb_addr, size);
+
+	memcpy(virt, bl_fb_addr_va, size);
+
+	MDSS_MDP_REG_WRITE(pipe_addr, phys);
+	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_CTL_FLUSH + MDSS_MDP_REG_CTL_OFFSET(0),
+			0x48);
+
+	return 0;
+
+}
+
+int mdss_mdp_reconfigure_splash_done(struct mdss_mdp_ctl *ctl)
+{
+	struct ion_client *iclient = mdss_get_ionclient();
+	struct mdss_panel_data *pdata;
+	int ret = 0, off;
+
+	off = 0;
+
+	pdata = ctl->panel_data;
+
+	pdata->panel_info.cont_splash_enabled = 0;
+
+	ion_free(iclient, pdata->panel_info.splash_ihdl);
+
+	mdss_mdp_ctl_write(ctl, 0, MDSS_MDP_LM_BORDER_COLOR);
+	off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+
+	/* wait for 1 VSYNC for the pipe to be unstaged */
+	msleep(20);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_FINISH,
+			NULL);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	mdss_mdp_footswitch_ctrl_splash(0);
+	return ret;
+}
+
+static int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
+{
+	int rc;
+
+	if (mfd->ctl->power_on)
+		return 0;
+
+	pr_debug("starting fb%d overlay\n", mfd->index);
+
+	rc = pm_runtime_get_sync(&mfd->pdev->dev);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("unable to resume with pm_runtime_get_sync rc=%d\n", rc);
+		return rc;
+	}
+
+	if (mfd->panel_info->cont_splash_enabled)
+		mdss_mdp_reconfigure_splash_done(mfd->ctl);
+
+	if (!is_mdss_iommu_attached()) {
+		mdss_iommu_attach(mdss_res);
+		mdss_hw_init(mdss_res);
+	}
+
+	rc = mdss_mdp_ctl_start(mfd->ctl);
+	if (rc == 0) {
+		atomic_inc(&ov_active_panels);
+	} else {
+		pr_err("overlay start failed.\n");
+		mdss_mdp_ctl_destroy(mfd->ctl);
+		mfd->ctl = NULL;
+
+		pm_runtime_put(&mfd->pdev->dev);
+	}
+
+	return rc;
+}
+
 int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
 {
 	struct msm_fb_data_type *mfd = ctl->mfd;
@@ -507,13 +628,23 @@
 	mutex_lock(&mfd->ov_lock);
 	mutex_lock(&mfd->lock);
 	list_for_each_entry(pipe, &mfd->pipes_used, used_list) {
-		if (pipe->params_changed || pipe->back_buf.num_planes) {
-			ret = mdss_mdp_pipe_queue_data(pipe, &pipe->back_buf);
-			if (IS_ERR_VALUE(ret)) {
-				pr_warn("Unable to queue data for pnum=%d\n",
-						pipe->num);
-				mdss_mdp_overlay_free_buf(&pipe->back_buf);
-			}
+		struct mdss_mdp_data *buf;
+		if (pipe->back_buf.num_planes) {
+			buf = &pipe->back_buf;
+		} else if (!pipe->params_changed) {
+			continue;
+		} else if (pipe->front_buf.num_planes) {
+			buf = &pipe->front_buf;
+		} else {
+			pr_warn("pipe queue without buffer\n");
+			buf = NULL;
+		}
+
+		ret = mdss_mdp_pipe_queue_data(pipe, buf);
+		if (IS_ERR_VALUE(ret)) {
+			pr_warn("Unable to queue data for pnum=%d\n",
+					pipe->num);
+			mdss_mdp_overlay_free_buf(buf);
 		}
 	}
 
@@ -743,6 +874,12 @@
 		return -EPERM;
 	}
 
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		return ret;
+	}
+
 	if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
 		ret = mdss_mdp_overlay_rotate(mfd, req);
 	} else if (req->id == BORDERFILL_NDX) {
@@ -880,6 +1017,12 @@
 		return;
 	}
 
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		return;
+	}
+
 	if (is_mdss_iommu_attached())
 		data.p[0].addr = mfd->iova;
 	else
@@ -975,10 +1118,6 @@
 
 	spin_lock_irqsave(&mfd->vsync_lock, flags);
 	INIT_COMPLETION(mfd->vsync_comp);
-	if (en && ctl->play_cnt == 0) {
-		mfd->vsync_time = ktime_get();
-		complete(&mfd->vsync_comp);
-	}
 	spin_unlock_irqrestore(&mfd->vsync_lock, flags);
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
@@ -1007,11 +1146,6 @@
 		return 0;
 
 	timeout = msecs_to_jiffies(VSYNC_PERIOD * 5);
-	if (mfd->ctl->play_cnt == 0) {
-		pr_debug("timegen enable still pending on fb%d\n", mfd->index);
-		timeout <<= 5;
-	}
-
 	ret = wait_for_completion_interruptible_timeout(&mfd->vsync_comp,
 			timeout);
 	if (ret <= 0) {
@@ -1286,7 +1420,7 @@
 
 static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
 {
-	int rc;
+	int rc = 0;
 
 	if (!mfd)
 		return -ENODEV;
@@ -1322,21 +1456,19 @@
 		mfd->ctl = ctl;
 	}
 
-	pm_runtime_get_sync(&mfd->pdev->dev);
-
-	rc = mdss_mdp_ctl_start(mfd->ctl);
-	if (rc == 0) {
-		atomic_inc(&ov_active_panels);
-
-		if (mfd->vsync_pending) {
-			mfd->vsync_pending = 0;
-			mdss_mdp_overlay_vsync_ctrl(mfd, mfd->vsync_pending);
-		}
+	if (!mfd->panel_info->cont_splash_enabled) {
+		rc = mdss_mdp_overlay_start(mfd);
+		if (!IS_ERR_VALUE(rc))
+			rc = mdss_mdp_overlay_kickoff(mfd->ctl);
 	} else {
-		mdss_mdp_ctl_destroy(mfd->ctl);
-		mfd->ctl = NULL;
+		rc = mdss_mdp_ctl_setup(mfd->ctl);
+		if (rc)
+			return rc;
+	}
 
-		pm_runtime_put(&mfd->pdev->dev);
+	if (!IS_ERR_VALUE(rc) && mfd->vsync_pending) {
+		mfd->vsync_pending = 0;
+		mdss_mdp_overlay_vsync_ctrl(mfd, mfd->vsync_pending);
 	}
 
 	return rc;
@@ -1357,6 +1489,9 @@
 		return -ENODEV;
 	}
 
+	if (!mfd->ctl->power_on)
+		return 0;
+
 	mdss_mdp_overlay_release_all(mfd);
 
 	rc = mdss_mdp_ctl_stop(mfd->ctl);
@@ -1370,7 +1505,9 @@
 		if (atomic_dec_return(&ov_active_panels) == 0)
 			mdss_mdp_rotator_release_all();
 
-		pm_runtime_put(&mfd->pdev->dev);
+		rc = pm_runtime_put(&mfd->pdev->dev);
+		if (rc)
+			pr_err("unable to suspend w/pm_runtime_put (%d)\n", rc);
 	}
 
 	return rc;
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 0a52561..8c88646 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -98,6 +98,9 @@
 		num_blks = DIV_ROUND_UP(2 * ps.ystride[i],
 			mdss_res->smp_mb_size);
 
+		if (mdss_res->mdp_rev == MDSS_MDP_HW_REV_100)
+			num_blks = roundup_pow_of_two(num_blks);
+
 		pr_debug("reserving %d mmb for pnum=%d plane=%d\n",
 				num_blks, pipe->num, i);
 		reserved = mdss_mdp_smp_mmb_reserve(&pipe->smp[i], num_blks);
@@ -709,6 +712,28 @@
 	return 0;
 }
 
+static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe)
+{
+	int ret;
+	u32 secure, format;
+
+	pr_debug("solid fill setup on pnum=%d\n", pipe->num);
+
+	ret = mdss_mdp_image_setup(pipe);
+	if (ret) {
+		pr_err("image setup error for pnum=%d\n", pipe->num);
+		return ret;
+	}
+
+	format = MDSS_MDP_FMT_SOLID_FILL;
+	secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, format);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+
+	return 0;
+}
+
 int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
 			     struct mdss_mdp_data *src_data)
 {
@@ -731,6 +756,11 @@
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
 	params_changed = pipe->params_changed;
+	if (src_data == NULL) {
+		mdss_mdp_pipe_solidfill_setup(pipe);
+		goto update_nobuf;
+	}
+
 	if (params_changed) {
 		pipe->params_changed = 0;
 
@@ -768,6 +798,7 @@
 		goto done;
 	}
 
+update_nobuf:
 	mdss_mdp_mixer_pipe_update(pipe, params_changed);
 
 	pipe->play_cnt++;
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index 23efcb8..d24a7c9 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -26,7 +26,6 @@
 #include "mdss_mdp.h"
 #include "mdss_fb.h"
 
-#define DEBUG_WRITEBACK
 
 enum mdss_mdp_wb_state {
 	WB_OPEN,
@@ -43,6 +42,8 @@
 	struct list_head register_queue;
 	wait_queue_head_t wait_q;
 	u32 state;
+	int is_secure;
+	struct mdss_mdp_pipe *secure_pipe;
 };
 
 enum mdss_mdp_wb_node_state {
@@ -121,6 +122,72 @@
 }
 #endif
 
+int mdss_mdp_wb_set_secure(struct msm_fb_data_type *mfd, int enable)
+{
+	struct mdss_mdp_wb *wb;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_mixer *mixer;
+
+	pr_debug("setting secure=%d\n", enable);
+
+	wb = mfd->wb;
+	if (wb == NULL) {
+		pr_err("Invalid writeback session\n");
+		return -ENODEV;
+	}
+
+	wb->is_secure = enable;
+	pipe = wb->secure_pipe;
+
+	if (!enable) {
+		if (pipe) {
+			/* unset pipe */
+			mdss_mdp_mixer_pipe_unstage(pipe);
+			mdss_mdp_pipe_destroy(pipe);
+			wb->secure_pipe = NULL;
+		}
+		return 0;
+	}
+
+	mixer = mdss_mdp_mixer_get(mfd->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+	if (!mixer) {
+		pr_err("Unable to find mixer for wb\n");
+		return -ENOENT;
+	}
+
+	if (!pipe) {
+		pipe = mdss_mdp_pipe_alloc(mixer, MDSS_MDP_PIPE_TYPE_RGB);
+		if (!pipe)
+			pipe = mdss_mdp_pipe_alloc(mixer,
+					MDSS_MDP_PIPE_TYPE_VIG);
+		if (!pipe) {
+			pr_err("Unable to get pipe to set secure session\n");
+			return -ENOMEM;
+		}
+
+		pipe->src_fmt = mdss_mdp_get_format_params(MDP_RGBA_8888);
+
+		pipe->mfd = mfd;
+		pipe->mixer_stage = MDSS_MDP_STAGE_BASE;
+		wb->secure_pipe = pipe;
+	}
+
+	pipe->img_height = mixer->height;
+	pipe->img_width = mixer->width;
+	pipe->src.x = 0;
+	pipe->src.y = 0;
+	pipe->src.w = pipe->img_width;
+	pipe->src.h = pipe->img_height;
+	pipe->dst = pipe->src;
+
+	pipe->flags = (enable ? MDP_SECURE_OVERLAY_SESSION : 0);
+	pipe->params_changed++;
+
+	pr_debug("setting secure pipe=%d flags=%x\n", pipe->num, pipe->flags);
+
+	return mdss_mdp_pipe_queue_data(pipe, NULL);
+}
+
 static int mdss_mdp_wb_init(struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_wb *wb;
@@ -173,6 +240,10 @@
 			kfree(node);
 		}
 	}
+
+	wb->is_secure = false;
+	if (wb->secure_pipe)
+		mdss_mdp_pipe_destroy(wb->secure_pipe);
 	mutex_unlock(&wb->lock);
 
 	mfd->wb = NULL;
@@ -257,6 +328,8 @@
 	buf = &node->buf_data.p[0];
 	buf->addr = (u32) (data->iova + data->offset);
 	buf->len = UINT_MAX; /* trusted source */
+	if (wb->is_secure)
+		buf->flags |= MDP_SECURE_OVERLAY_SESSION;
 	ret = mdss_mdp_wb_register_node(wb, node);
 	if (IS_ERR_VALUE(ret)) {
 		pr_err("error registering wb node\n");
@@ -284,6 +357,8 @@
 
 	node->buf_data.num_planes = 1;
 	buf = &node->buf_data.p[0];
+	if (wb->is_secure)
+		buf->flags |= MDP_SECURE_OVERLAY_SESSION;
 	ret = mdss_mdp_get_img(data, buf);
 	if (IS_ERR_VALUE(ret)) {
 		pr_err("error getting buffer info\n");
@@ -419,6 +494,9 @@
 	wb = ctl->mfd->wb;
 	if (wb) {
 		mutex_lock(&wb->lock);
+		/* in case of reinit of control path need to reset secure */
+		if (ctl->play_cnt == 0)
+			mdss_mdp_wb_set_secure(ctl->mfd, wb->is_secure);
 		if (!list_empty(&wb->free_queue) && wb->state != WB_STOPING &&
 		    wb->state != WB_STOP) {
 			node = list_first_entry(&wb->free_queue,
@@ -438,7 +516,8 @@
 
 	if (wb_args.data == NULL) {
 		pr_err("unable to get writeback buf ctl=%d\n", ctl->num);
-		ret = -ENOMEM;
+		/* drop buffer but don't return error */
+		ret = 0;
 		goto kickoff_fail;
 	}
 
@@ -568,8 +647,31 @@
 }
 EXPORT_SYMBOL(msm_fb_writeback_terminate);
 
-int msm_fb_get_iommu_domain(void)
+int msm_fb_get_iommu_domain(struct fb_info *info, int domain)
 {
-	return mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+	int mdss_domain;
+	switch (domain) {
+	case MDP_IOMMU_DOMAIN_CP:
+		mdss_domain = MDSS_IOMMU_DOMAIN_SECURE;
+		break;
+	case MDP_IOMMU_DOMAIN_NS:
+		mdss_domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+		break;
+	default:
+		pr_err("Invalid mdp iommu domain (%d)\n", domain);
+		return -EINVAL;
+	}
+	return mdss_get_iommu_domain(mdss_domain);
 }
 EXPORT_SYMBOL(msm_fb_get_iommu_domain);
+
+int msm_fb_writeback_set_secure(struct fb_info *info, int enable)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_set_secure(mfd, enable);
+}
+EXPORT_SYMBOL(msm_fb_writeback_set_secure);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 40131eb..31fb2e7 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -65,6 +65,7 @@
 	MDSS_EVENT_SUSPEND,
 	MDSS_EVENT_RESUME,
 	MDSS_EVENT_CHECK_PARAMS,
+	MDSS_EVENT_CONT_SPLASH_FINISH,
 	MDSS_EVENT_FB_REGISTERED,
 };
 
@@ -183,6 +184,10 @@
 	u32 out_format;
 	u32 vic; /* video identification code */
 
+	u32 cont_splash_enabled;
+	struct ion_handle *splash_ihdl;
+	u32 panel_power_on;
+
 	struct lcd_panel_info lcd;
 	struct lcdc_panel_info lcdc;
 	struct mipi_panel_info mipi;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 0715b0b..b96e093 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1181,6 +1181,26 @@
 		bpp = 4;
 		break;
 
+	case MDP_BGRA_8888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 0;
+		var->green.offset = 8;
+		var->red.offset = 16;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 24;
+		var->transp.length = 8;
+		bpp = 4;
+		break;
+
+
 	case MDP_YCRYCB_H2V1:
 		/* ToDo: need to check TV-Out YUV422i framebuffer format */
 		/*       we might need to create new type define */
@@ -1900,7 +1920,9 @@
 		break;
 
 	case 32:
-		if (var->transp.offset == 24)
+		if ((var->transp.offset == 24) && (var->blue.offset == 0))
+			mfd->fb_imgType = MDP_BGRA_8888;
+		else if (var->transp.offset == 24)
 			mfd->fb_imgType = MDP_ARGB_8888;
 		else
 			mfd->fb_imgType = MDP_RGBA_8888;
@@ -3279,6 +3301,10 @@
 		ret = mdp4_update_base_blend(mfd,
 						&metadata_ptr->data.blend_cfg);
 		break;
+	case metadata_op_wb_format:
+		ret = mdp4_update_writeback_format(mfd,
+					&metadata_ptr->data.mixer_cfg);
+		break;
 #endif
 	default:
 		pr_warn("Unsupported request to MDP META IOCTL.\n");
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
index c544356..9071ef1 100644
--- a/include/asm-generic/dma-contiguous.h
+++ b/include/asm-generic/dma-contiguous.h
@@ -11,15 +11,13 @@
 {
 	if (dev && dev->cma_area)
 		return dev->cma_area;
-	return dma_contiguous_default_area;
+	return dma_contiguous_def_area;
 }
 
 static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
 {
 	if (dev)
 		dev->cma_area = cma;
-	if (!dev || !dma_contiguous_default_area)
-		dma_contiguous_default_area = cma;
 }
 
 #endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 06772d9..f6ca334 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -385,6 +385,7 @@
 header-y += types.h
 header-y += udf_fs_i.h
 header-y += udp.h
+header-y += uhid.h
 header-y += uinput.h
 header-y += uio.h
 header-y += ultrasound.h
@@ -450,3 +451,4 @@
 header-y += ci-bridge-spi.h
 header-y += msm_audio_amrwbplus.h
 header-y += avtimer.h
+header-y += msm_ipa.h
diff --git a/include/linux/bif/consumer.h b/include/linux/bif/consumer.h
new file mode 100644
index 0000000..e4c190e
--- /dev/null
+++ b/include/linux/bif/consumer.h
@@ -0,0 +1,613 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BIF_CONSUMER_H_
+#define _LINUX_BIF_CONSUMER_H_
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+#define BIF_DEVICE_ID_BYTE_LENGTH	8
+#define BIF_UNIQUE_ID_BYTE_LENGTH	10
+#define BIF_UNIQUE_ID_BIT_LENGTH	80
+
+#define BIF_PRIMARY_SLAVE_DEV_ADR	0x01
+
+/**
+ * enum bif_transaction - BIF master bus transaction types
+ * %BIF_TRANS_WD:	Write data
+ * %BIF_TRANS_ERA:	Extended register address
+ * %BIF_TRANS_WRA:	Write register address
+ * %BIF_TRANS_RRA:	Read register address
+ * %BIF_TRANS_BC:	Bus command
+ * %BIF_TRANS_EDA:	Extended device address
+ * %BIF_TRANS_SDA:	Slave device address
+ *
+ * These values correspond to BIF word bits: BCF, bit 9, bit 8.
+ * BCF_n bit is inserted automatically.
+ */
+enum bif_transaction {
+	BIF_TRANS_WD	= 0x00,
+	BIF_TRANS_ERA	= 0x01,
+	BIF_TRANS_WRA	= 0x02,
+	BIF_TRANS_RRA	= 0x03,
+	BIF_TRANS_BC	= 0x04,
+	BIF_TRANS_EDA	= 0x05,
+	BIF_TRANS_SDA	= 0x06,
+};
+
+/* BIF slave response components */
+#define BIF_SLAVE_RD_ACK		0x200
+#define BIF_SLAVE_RD_EOT		0x100
+#define BIF_SLAVE_RD_DATA		0x0FF
+#define BIF_SLAVE_RD_ERR		0x0FF
+#define BIF_SLAVE_TACK_ACK		0x200
+#define BIF_SLAVE_TACK_WCNT		0x0FF
+#define BIF_SLAVE_TACK_ERR		0x0FF
+
+/**
+ * enum bif_bus_command - MIPI defined bus commands to use in BC transaction
+ * %BIF_CMD_BRES:	Bus reset of all slaves
+ * %BIF_CMD_PDWN:	Put all slaves into power down mode
+ * %BIF_CMD_STBY:	Put all slaves into standby mode
+ * %BIF_CMD_EINT:	Enable interrupts for all slaves
+ * %BIF_CMD_ISTS:	Poll interrupt status for all slaves.  Expects BQ
+ *			response if any slave has a pending interrupt.
+ * %BIF_CMD_RBL:	Specify the burst read length for the next read
+ *			transaction.  Bits 3 to 0 should also be ORed on in
+ *			order to specify the number of bytes to read.
+ * %BIF_CMD_RBE:	Specify the extended burst read length for the next read
+ *			transaction.  Bits 3 to 0 should also be ORed on in
+ *			order to specify the number of bytes to read.  The burst
+ *			read length for RBEy and RBLx = 16 * y + x.
+ * %BIF_CMD_DASM:	Device activation stick mode.  This keeps a slave
+ *			selected if it would otherwise become unselected by the
+ *			next transaction.
+ * %BIF_CMD_DISS:	UID search start
+ * %BIF_CMD_DILC:	UID length check.  Expects BQ response if all 80 UID
+ *			bits for a given slave have been entered.
+ * %BIF_CMD_DIE0:	UID search enter 0
+ * %BIF_CMD_DIE1:	UID search enter 1
+ * %BIF_CMD_DIP0:	UID search probe 0
+ * %BIF_CMD_DIP1:	UID search probe 1
+ * %BIF_CMD_DRES:	Device reset of selected slaves
+ * %BIF_CMD_TQ:		Transaction query; expects TACK response
+ * %BIF_CMD_AIO:	Address increment off for the next transaction
+ *
+ * These values correspond to BIF word bits 7 to 0.
+ */
+enum bif_bus_command {
+	BIF_CMD_BRES	= 0x00,
+	BIF_CMD_PDWN	= 0x02,
+	BIF_CMD_STBY	= 0x03,
+	BIF_CMD_EINT	= 0x10,
+	BIF_CMD_ISTS	= 0x11,
+	BIF_CMD_RBL	= 0x20,
+	BIF_CMD_RBE	= 0x30,
+	BIF_CMD_DASM	= 0x40,
+	BIF_CMD_DISS	= 0x80,
+	BIF_CMD_DILC	= 0x81,
+	BIF_CMD_DIE0	= 0x84,
+	BIF_CMD_DIE1	= 0x85,
+	BIF_CMD_DIP0	= 0x86,
+	BIF_CMD_DIP1	= 0x87,
+	BIF_CMD_DRES	= 0xC0,
+	BIF_CMD_TQ	= 0xC2,
+	BIF_CMD_AIO	= 0xC4,
+};
+
+/**
+ * struct bif_ddb_l1_data - MIPI defined L1 DDB data structure
+ * @revision:		DDB version; should be 0x10 for DDB v1.0
+ * @level:		DDB level support; should be 0x03 for DDB L1 and L2
+ * @device_class:	MIPI device class; should be 0x0800
+ * @manufacturer_id:	Manufacturer ID number allocated by MIPI
+ * @product_id:		Manufacturer specified product ID number
+ * @length:		Size of L2 function directory in bytes
+ */
+struct bif_ddb_l1_data {
+	u8	revision;
+	u8	level;
+	u16	device_class;
+	u16	manufacturer_id;
+	u16	product_id;
+	u16	length;
+};
+
+/**
+ * struct bif_ddb_l2_data - MIPI defined L2 DDB function data structure
+ * @function_type:	Defines the type of the function.  The type may be
+ *			either MIPI or manufacturer defined.
+ * @function_version:	Defines the version of the function.  The version may
+ *			be either MIPI or manufacturer defined.
+ * @function_pointer:	Address in BIF slave memory where the register map for
+ *			the function begins.
+ */
+struct bif_ddb_l2_data {
+	u8	function_type;
+	u8	function_version;
+	u16	function_pointer;
+};
+
+/**
+ * enum bif_mipi_function_type - MIPI defined DDB L2 function types
+ * %BIF_FUNC_PROTOCOL:		Protocol function which provides access to core
+ *				BIF communication features.
+ * %BIF_FUNC_SLAVE_CONTROL:	Slave control function which provides control
+ *				for BIF slave interrupts and tasks.
+ * %BIF_FUNC_TEMPERATURE:	Temperature sensor function which provides a
+ *				means to accurately read the battery temperature
+ *				in a single-shot or periodic fashion.
+ * %BIF_FUNC_NVM:		Non-volatile memory function which provides a
+ *				means to store data onto a BIF slave that is
+ *				non-volatile.  Secondary slave objects are also
+ *				found through the NVM function.
+ * %BIF_FUNC_AUTHENTICATION:	Authentication function which provides a means
+ *				to authenticate batteries.  This function does
+ *				not have a MIPI defined implimentation.  Instead
+ *				all aspects of the authentication function are
+ *				left to the discretion of the manufacturer.
+ */
+enum bif_mipi_function_type {
+	BIF_FUNC_PROTOCOL	= 0x01,
+	BIF_FUNC_SLAVE_CONTROL	= 0x02,
+	BIF_FUNC_TEMPERATURE	= 0x03,
+	BIF_FUNC_NVM		= 0x04,
+	BIF_FUNC_AUTHENTICATION	= 0x05,
+};
+
+#define BIF_DDB_L1_BASE_ADDR	0x0000
+#define BIF_DDB_L2_BASE_ADDR	0x000A
+
+/**
+ * enum bif_slave_error_code - MIPI defined BIF slave error codes
+ * %BIF_ERR_NONE:		No error occurred
+ * %BIF_ERR_GENERAL:		An unenumerated error occurred
+ * %BIF_ERR_PARITY:		A Hamming-15 parity check failed for a word
+ *				sent on the bus
+ * %BIF_ERR_INVERSION:		More than 8 bits in a word were 1
+ * %BIF_ERR_BAD_LENGTH:		Word had more or less than 17 bits
+ * %BIF_ERR_TIMING:		Bit timing was violated in a word
+ * %BIF_ERR_UNKNOWN_CMD:	Bus command was unknown to the slave
+ * %BIF_ERR_CMD_SEQ:		Commands with ordering dependency were not
+ *				sent in the right order
+ * %BIF_ERR_BUS_COLLISION:	BCL was already low at the beginning of a new
+ *				transaction
+ * %BIF_ERR_SLAVE_BUSY:		Slave is busy and cannot respond
+ * %BIF_ERR_FATAL:		Slave is in an unrecoverable error state and
+ *				must be reset
+ *
+ * These values are present in the ERR portion of an RD or TACK slave response
+ * word.  These values can also be found in the ERR_CODE register of the
+ * protocol function.
+ */
+enum bif_slave_error_code {
+	BIF_ERR_NONE		= 0x00,
+	BIF_ERR_GENERAL		= 0x10,
+	BIF_ERR_PARITY		= 0x11,
+	BIF_ERR_INVERSION	= 0x12,
+	BIF_ERR_BAD_LENGTH	= 0x13,
+	BIF_ERR_TIMING		= 0x14,
+	BIF_ERR_UNKNOWN_CMD	= 0x15,
+	BIF_ERR_CMD_SEQ		= 0x16,
+	BIF_ERR_BUS_COLLISION	= 0x1F,
+	BIF_ERR_SLAVE_BUSY	= 0x20,
+	BIF_ERR_FATAL		= 0x7F,
+};
+
+/**
+ * struct bif_protocol_function - constant data present in protocol function
+ * @l2_entry:		Pointer to protocol function L2 DDB data struct
+ * @protocol_pointer:	BIF slave address where protocol registers begin
+ * @device_id_pointer:	BIF slave address where device ID begins
+ * @device_id:		The 8-byte unique device ID in MSB to LSB order
+ */
+struct bif_protocol_function {
+	struct bif_ddb_l2_data *l2_entry;
+	u16	protocol_pointer;
+	u16	device_id_pointer;
+	u8	device_id[BIF_DEVICE_ID_BYTE_LENGTH]; /* Unique ID */
+};
+
+#define PROTOCOL_FUNC_DEV_ADR_ADDR(protocol_pointer)	((protocol_pointer) + 0)
+#define PROTOCOL_FUNC_ERR_CODE_ADDR(protocol_pointer)	((protocol_pointer) + 2)
+#define PROTOCOL_FUNC_ERR_CNT_ADDR(protocol_pointer)	((protocol_pointer) + 3)
+#define PROTOCOL_FUNC_WORD_CNT_ADDR(protocol_pointer)	((protocol_pointer) + 4)
+
+/**
+ * struct bif_slave_control_function - constant data present in slave control
+ *			function as well internal software state parameters
+ * @l2_entry:		Pointer to slave control function L2 DDB data struct
+ * @slave_ctrl_pointer:	BIF slave address where slave control registers begin
+ * @task_count:		Number of tasks supported by the slave
+ * @irq_notifier_list:	List of notifiers for consumers drivers that wish to be
+ *			notified when any given interrupt triggers.  This list
+ *			is dynamically allocated with length task_count.
+ */
+struct bif_slave_control_function {
+	struct bif_ddb_l2_data		*l2_entry;
+	u16				slave_ctrl_pointer;
+	unsigned int			task_count;
+	struct blocking_notifier_head	*irq_notifier_list;
+};
+
+#define SLAVE_CTRL_TASKS_PER_SET	8
+
+/**
+ * bif_slave_control_task_is_valid() - returns true if the specified task
+ *		is supported by the slave or false if it isn't
+ * @func:	Pointer to slave's slave control function structure
+ * @task:	Slave task number to check
+ */
+static inline bool
+bif_slave_control_task_is_valid(struct bif_slave_control_function *func,
+				unsigned int task)
+{
+	return func ? task < func->task_count : false;
+}
+
+#define SLAVE_CTRL_FUNC_IRQ_EN_ADDR(slave_ctrl_pointer, task) \
+	((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 0)
+
+#define SLAVE_CTRL_FUNC_IRQ_STATUS_ADDR(slave_ctrl_pointer, task) \
+	((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 1)
+#define SLAVE_CTRL_FUNC_IRQ_CLEAR_ADDR(slave_ctrl_pointer, task) \
+	SLAVE_CTRL_FUNC_IRQ_STATUS_ADDR(slave_ctrl_pointer, task)
+
+#define SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(slave_ctrl_pointer, task) \
+	((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 2)
+#define SLAVE_CTRL_FUNC_TASK_BUSY_ADDR(slave_ctrl_pointer, task) \
+	SLAVE_CTRL_FUNC_TASK_TRIGGER_ADDR(slave_ctrl_pointer, task)
+
+#define SLAVE_CTRL_FUNC_TASK_AUTO_TRIGGER_ADDR(slave_ctrl_pointer, task) \
+	((slave_ctrl_pointer) + 4 * ((task) / SLAVE_CTRL_TASKS_PER_SET) + 3)
+
+/**
+ * struct bif_temperature_function - constant data present in temperature
+ *				sensor function
+ * @temperatuer_pointer:	BIF slave address where temperature sensor
+ *				control registers begin
+ * @slave_control_channel:	Slave control channel associated with the
+ *				temperature sensor function.  This channel is
+ *				also the task number.
+ * @accuracy_pointer:		BIF slave address where temperature accuracy
+ *				registers begin
+ */
+struct bif_temperature_function {
+	u16	temperature_pointer;
+	u8	slave_control_channel;
+	u16	accuracy_pointer;
+};
+
+/**
+ * enum bif_mipi_object_type - MIPI defined BIF object types
+ * %BIF_OBJ_END_OF_LIST:	Indicates that the end of the object list in
+ *				NVM has been reached
+ * %BIF_OBJ_SEC_SLAVE:		Specifies the UIDs of secondary slaves found
+ *				inside of the battery pack
+ * %BIF_OBJ_BATT_PARAM:		Specifies some variety of battery parameter.
+ *				There is no MIPI defined format for this object
+ *				type so parsing is manufacturer specific.
+ */
+enum bif_mipi_object_type {
+	BIF_OBJ_END_OF_LIST	= 0x00,
+	BIF_OBJ_SEC_SLAVE	= 0x01,
+	BIF_OBJ_BATT_PARAM	= 0x02,
+};
+
+/**
+ * struct bif_object - contains all header and data information for a slave
+ *			data object
+ * @type:		Object type
+ * @version:		Object version
+ * @manufacturer_id:	Manufacturer ID number allocated by MIPI
+ * @length:		Length of the entire object including header and CRC
+ * @data:		Raw byte data found in the object
+ * @crc:		CRC of the object calculated using CRC-CCITT
+ * @list:		Linked-list connection parameter
+ * @addr:		BIF slave address correspond to the start of the object
+ *
+ * manufacturer_id == 0x0000 if MIPI type and version.
+ */
+struct bif_object {
+	u8			type;
+	u8			version;
+	u16			manufacturer_id;
+	u16			length;
+	u8			*data;
+	u16			crc;
+	struct list_head	list;
+	u16			addr;
+};
+
+/**
+ * struct bif_nvm_function - constant data present in non-volatile memory
+ *				function as well internal software state
+ *				parameters
+ * @nvm_pointer:		BIF slave address where NVM registers begin
+ * @slave_control_channel:	Slave control channel associated with the
+ *				NVM function.  This channel is also the task
+ *				number.
+ * @write_buffer_size:		Size in bytes of the NVM write buffer.  0x00
+ *				is used to denote a 256 byte buffer.
+ * @nvm_base_address:		BIF slave address where NVM begins
+ * @nvm_size:			NVM size in bytes
+ * @object_count:		Number of BIF objects read from NVM
+ * @object_list:		List of BIF objects read from NVM
+ */
+struct bif_nvm_function {
+	u16			nvm_pointer;
+	u8			slave_control_channel;
+	u8			write_buffer_size;
+	u16			nvm_base_address;
+	u16			nvm_size;
+	int			object_count;
+	struct list_head	object_list;
+};
+
+/**
+ * struct bif_ctrl - Opaque handle for a BIF controller to be used in bus
+ *			oriented BIF function calls.
+ */
+struct bif_ctrl;
+
+/**
+ * struct bif_slave - Opaque handle for a BIF slave to be used in slave oriented
+ *			BIF function calls.
+ */
+struct bif_slave;
+
+/**
+ * enum bif_bus_state - indicates the current or desired state of the BIF bus
+ * %BIF_BUS_STATE_MASTER_DISABLED:	BIF host hardware is disabled
+ * %BIF_BUS_STATE_POWER_DOWN:		BIF bus is in power down state and
+ *					BCL is not being pulled high
+ * %BIF_BUS_STATE_STANDBY:		BIF slaves are in standby state in which
+ *					less power is drawn
+ * %BIF_BUS_STATE_ACTIVE:		BIF slaves are ready for immediate
+ *					communications
+ * %BIF_BUS_STATE_INTERRUPT:		BIF bus is active, but no communication
+ *					is possible.  Instead, either one of the
+ *					slaves or the master must transition to
+ *					active state by pulling BCL low for 1
+ *					tau bif period.
+ */
+enum bif_bus_state {
+	BIF_BUS_STATE_MASTER_DISABLED,
+	BIF_BUS_STATE_POWER_DOWN,
+	BIF_BUS_STATE_STANDBY,
+	BIF_BUS_STATE_ACTIVE,
+	BIF_BUS_STATE_INTERRUPT,
+};
+
+/**
+ * enum bif_bus_event - events that the BIF framework may send to BIF consumers
+ * %BIF_BUS_EVENT_BATTERY_INSERTED:	Indicates that a battery was just
+ *					inserted physically or that the BIF
+ *					host controller for the battery just
+ *					probed and a battery was already
+ *					present.
+ * %BIF_BUS_EVENT_BATTERY_REMOVED:	Indicates that a battery was just
+ *					removed and thus its slaves are no
+ *					longer accessible.
+ */
+enum bif_bus_event {
+	BIF_BUS_EVENT_BATTERY_INSERTED,
+	BIF_BUS_EVENT_BATTERY_REMOVED,
+};
+
+/* Mask values to be ORed together for use in bif_match_criteria.match_mask. */
+#define BIF_MATCH_MANUFACTURER_ID	BIT(0)
+#define BIF_MATCH_PRODUCT_ID		BIT(1)
+#define BIF_MATCH_FUNCTION_TYPE		BIT(2)
+#define BIF_MATCH_FUNCTION_VERSION	BIT(3)
+#define BIF_MATCH_IGNORE_PRESENCE	BIT(4)
+
+/**
+ * struct bif_match_criteria - specifies the matching criteria that a BIF
+ *			consumer uses to find an appropriate BIF slave
+ * @match_mask:		Mask value specifying which parameters to match upon.
+ *			This value should be some ORed combination of
+ *			BIF_MATCH_* specified above.
+ * @manufacturer_id:	Manufacturer ID number allocated by MIPI
+ * @product_id:		Manufacturer specified product ID number
+ * @function_type:	Defines the type of the function.  The type may be
+ *			either MIPI or manufacturer defined.
+ * @function_version:	Defines the version of the function.  The version may
+ *			be either MIPI or manufacturer defined.
+ * @ignore_presence:	If true, then slaves that are currently not present
+ *			will be successfully matched against.  By default, only
+ *			present slaves can be matched.
+ */
+struct bif_match_criteria {
+	u32	match_mask;
+	u16	manufacturer_id;
+	u16	product_id;
+	u8	function_type;
+	u8	function_version;
+	bool	ignore_presence;
+};
+
+/**
+ * bif_battery_rid_ranges - MIPI-BIF defined Rid battery pack resistance ranges
+ * %BIF_BATT_RID_SPECIAL1_MIN:	Minimum Rid for special case 1
+ * %BIF_BATT_RID_SPECIAL1_MAX:	Maximum Rid for special case 1
+ * %BIF_BATT_RID_SPECIAL2_MIN:	Minimum Rid for special case 2
+ * %BIF_BATT_RID_SPECIAL2_MAX:	Maximum Rid for special case 2
+ * %BIF_BATT_RID_SPECIAL3_MIN:	Minimum Rid for special case 3
+ * %BIF_BATT_RID_SPECIAL3_MAX:	Maximum Rid for special case 3
+ * %BIF_BATT_RID_LOW_COST_MIN:	Minimum Rid for a low cost battery pack
+ * %BIF_BATT_RID_LOW_COST_MAX:	Maximum Rid for a low cost battery pack
+ * %BIF_BATT_RID_SMART_MIN:	Minimum Rid for a smart battery pack
+ * %BIF_BATT_RID_SMART_MAX:	Maximum Rid for a smart battery pack
+ */
+enum bif_battery_rid_ranges {
+	BIF_BATT_RID_SPECIAL1_MIN	= 0,
+	BIF_BATT_RID_SPECIAL1_MAX	= 1,
+	BIF_BATT_RID_SPECIAL2_MIN	= 7350,
+	BIF_BATT_RID_SPECIAL2_MAX	= 7650,
+	BIF_BATT_RID_SPECIAL3_MIN	= 12740,
+	BIF_BATT_RID_SPECIAL3_MAX	= 13260,
+	BIF_BATT_RID_LOW_COST_MIN	= 19600,
+	BIF_BATT_RID_LOW_COST_MAX	= 140000,
+	BIF_BATT_RID_SMART_MIN		= 240000,
+	BIF_BATT_RID_SMART_MAX		= 450000,
+};
+
+#ifdef CONFIG_BIF
+
+int bif_request_irq(struct bif_slave *slave, unsigned int task,
+			struct notifier_block *nb);
+int bif_free_irq(struct bif_slave *slave, unsigned int task,
+			struct notifier_block *nb);
+
+int bif_trigger_task(struct bif_slave *slave, unsigned int task);
+int bif_task_is_busy(struct bif_slave *slave, unsigned int task);
+
+int bif_ctrl_count(void);
+struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id);
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev);
+void bif_ctrl_put(struct bif_ctrl *ctrl);
+
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl);
+
+int bif_slave_match_count(const struct bif_ctrl *ctrl,
+			const struct bif_match_criteria *match_criteria);
+
+struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+	unsigned int id, const struct bif_match_criteria *match_criteria);
+
+void bif_slave_put(struct bif_slave *slave);
+
+int bif_ctrl_notifier_register(struct bif_ctrl *ctrl,
+				struct notifier_block *nb);
+
+int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+				struct notifier_block *nb);
+
+struct bif_ctrl *bif_get_ctrl_handle(struct bif_slave *slave);
+
+int bif_slave_find_function(struct bif_slave *slave, u8 function, u8 *version,
+				u16 *function_pointer);
+
+int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf, int len);
+int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf, int len);
+
+int bif_slave_is_present(struct bif_slave *slave);
+
+int bif_slave_is_selected(struct bif_slave *slave);
+int bif_slave_select(struct bif_slave *slave);
+
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data);
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+					u8 data, int *response);
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+		u8 data, bool *query_response);
+
+void bif_ctrl_bus_lock(struct bif_ctrl *ctrl);
+void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl);
+
+u16 bif_crc_ccitt(const u8 *buffer, unsigned int len);
+
+int bif_ctrl_measure_rid(struct bif_ctrl *ctrl);
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl);
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns);
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl);
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state);
+
+#else
+
+static inline int bif_request_irq(struct bif_slave *slave, unsigned int task,
+			struct notifier_block *nb) { return -EPERM; }
+static inline int bif_free_irq(struct bif_slave *slave, unsigned int task,
+			struct notifier_block *nb) { return -EPERM; }
+
+static inline int bif_trigger_task(struct bif_slave *slave, unsigned int task)
+{ return -EPERM; }
+static inline int bif_task_is_busy(struct bif_slave *slave, unsigned int task)
+{ return -EPERM; }
+
+static inline int bif_ctrl_count(void) { return -EPERM; }
+static inline struct bif_ctrl *bif_ctrl_get_by_id(unsigned int id)
+{ return ERR_PTR(-EPERM); }
+struct bif_ctrl *bif_ctrl_get(struct device *consumer_dev)
+{ return ERR_PTR(-EPERM); }
+static inline void bif_ctrl_put(struct bif_ctrl *ctrl) { return; }
+
+int bif_ctrl_signal_battery_changed(struct bif_ctrl *ctrl) { return -EPERM; }
+
+static inline int bif_slave_match_count(const struct bif_ctrl *ctrl,
+			const struct bif_match_criteria *match_criteria)
+{ return -EPERM; }
+
+static inline struct bif_slave *bif_slave_match_get(const struct bif_ctrl *ctrl,
+	unsigned int id, const struct bif_match_criteria *match_criteria)
+{ return ERR_PTR(-EPERM); }
+
+static inline void bif_slave_put(struct bif_slave *slave) { return; }
+
+static inline int bif_ctrl_notifier_register(struct bif_ctrl *ctrl,
+				struct notifier_block *nb)
+{ return -EPERM; }
+
+static inline int bif_ctrl_notifier_unregister(struct bif_ctrl *ctrl,
+				struct notifier_block *nb)
+{ return -EPERM; }
+
+static inline struct bif_ctrl *bif_get_ctrl_handle(struct bif_slave *slave)
+{ return ERR_PTR(-EPERM); }
+
+static inline int bif_slave_find_function(struct bif_slave *slave, u8 function,
+				u8 *version, u16 *function_pointer)
+{ return -EPERM; }
+
+static inline int bif_slave_read(struct bif_slave *slave, u16 addr, u8 *buf,
+				int len)
+{ return -EPERM; }
+static inline int bif_slave_write(struct bif_slave *slave, u16 addr, u8 *buf,
+				int len)
+{ return -EPERM; }
+
+int bif_slave_is_present(struct bif_slave *slave) { return -EPERM; }
+
+int bif_slave_is_selected(struct bif_slave *slave) { return -EPERM; }
+int bif_slave_select(struct bif_slave *slave) { return -EPERM; }
+
+int bif_ctrl_raw_transaction(struct bif_ctrl *ctrl, int transaction, u8 data)
+{ return -EPERM; }
+int bif_ctrl_raw_transaction_read(struct bif_ctrl *ctrl, int transaction,
+					u8 data, int *response)
+{ return -EPERM; }
+int bif_ctrl_raw_transaction_query(struct bif_ctrl *ctrl, int transaction,
+		u8 data, bool *query_response)
+{ return -EPERM; }
+
+static inline void bif_ctrl_bus_lock(struct bif_ctrl *ctrl)
+{ return -EPERM; }
+static inline void bif_ctrl_bus_unlock(struct bif_ctrl *ctrl)
+{ return -EPERM; }
+
+static inline u16 bif_crc_ccitt(const u8 *buffer, unsigned int len)
+{ return 0; }
+
+static inline int bif_ctrl_measure_rid(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_get_bus_period(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_set_bus_period(struct bif_ctrl *ctrl, int period_ns)
+{ return -EPERM; }
+int bif_ctrl_get_bus_state(struct bif_ctrl *ctrl) { return -EPERM; }
+int bif_ctrl_set_bus_state(struct bif_ctrl *ctrl, enum bif_bus_state state)
+{ return -EPERM; }
+
+#endif
+
+#endif
diff --git a/include/linux/bif/driver.h b/include/linux/bif/driver.h
new file mode 100644
index 0000000..184d46f
--- /dev/null
+++ b/include/linux/bif/driver.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BIF_DRIVER_H_
+#define _LINUX_BIF_DRIVER_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/bif/consumer.h>
+
+/**
+ * struct bif_ctrl_dev - opaque handle used to identify a given BIF controller
+ *			device
+ */
+struct bif_ctrl_dev;
+
+/**
+ * struct bif_ctrl_ops - BIF operations which may be implemented by BIF
+ *				controller drivers
+ * @bus_transaction:		Perform the specified BIF transaction which does
+ *				not result in any slave response.
+ * @bus_transaction_query:	Perform the specified BIF transaction which
+ *				expects a BQ response in the case of slave
+ *				positive acknowledgement.
+ * @bus_transaction_read:	Perform the specified BIF transaction which
+ *				expects an RD or TACK response from the selected
+ *				slave.
+ * @read_slave_registers:	Perform all BIF transactions necessary to read
+ *				the specified set of contiguous registers from
+ *				the previously selected slave.  This operation
+ *				is used to optimize the common case of slave
+ *				register reads since the a BIF controller driver
+ *				can take advantage of BIF burst reads while the
+ *				BIF core driver cannot due to the inherient
+ *				tight timing requirements.
+ * @write_slave_registers:	Perform all BIF transactions necessary to write
+ *				the specified set of contiguous registers to
+ *				the previously selected slave.  This operation
+ *				is used to optimize the common case of slave
+ *				register writes since the a BIF controller
+ *				driver can remove redundant steps when
+ *				performing several WD commands in a row.
+ * @get_bus_period:		Return the tau_bif BIF bus clock period in
+ *				nanoseconds.
+ * @set_bus_period:		Set the tau_bif BIF bus clock period in
+ *				nanoseconds.  If the exact period is not
+ *				supported by the BIF controller hardware, then
+ *				the next larger supported period should be used.
+ * @get_battery_presence:	Return the current state of the battery pack.
+ *				If a battery pack is present, then return >= 1.
+ *				If a battery pack is not present, then return 0.
+ *				If an error occurs during presence detection,
+ *				then return errno.
+ * @get_battery_rid:		Return the measured value of the Rid battery
+ *				pack pull-down resistor in ohms.
+ * @get_bus_state:		Return the current bus state as defined by one
+ *				of the enum bif_bus_state values.
+ * @set_bus_state:		Set the BIF bus state to the specified enum
+ *				bif_bus_state value.
+ *
+ * The following operations must be defined by every BIF controller driver in
+ * order to ensure baseline functionality:
+ * bus_transaction, bus_transaction_query, get_bus_state, and set_bus_state.
+ *
+ * The BIF core driver is unaware of BIF transaction timing constraints.  A
+ * given BIF controller driver must ensure that all timing constraints in the
+ * MIPI-BIF specification are met as transactions are carried out.
+ *
+ * Conversion between 11-bit and 17-bit BIF words (i.e. the insertion of BCF_n,
+ * parity bits, and the inversion bit) must be handled inside of the BIF
+ * controller driver (either in software or hardware).  This guarantees maximum
+ * performance if hardware support is available.
+ *
+ * The bus_transaction_read operation must return -ETIMEDOUT in the case of no
+ * RD or TACK word received.  This allows the transaction query, TQ, command
+ * to be used for slave selection verification.
+ *
+ * It is acceptable for the BIF bus state to be changed autonomously by a BIF
+ * controller driver in response to low level bus actions without a call to
+ * set_bus_state.  One example is the case of receiving a slave interrupt
+ * while in interrupt state as this intrinsically causes the bus to enter the
+ * active communication state.
+ */
+struct bif_ctrl_ops {
+	int (*bus_transaction) (struct bif_ctrl_dev *bdev, int transaction,
+					u8 data);
+	int (*bus_transaction_query) (struct bif_ctrl_dev *bdev,
+					int transaction, u8 data,
+					bool *query_response);
+	int (*bus_transaction_read) (struct bif_ctrl_dev *bdev,
+					int transaction, u8 data,
+					int *response);
+	int (*read_slave_registers) (struct bif_ctrl_dev *bdev, u16 addr,
+					u8 *data, int len);
+	int (*write_slave_registers) (struct bif_ctrl_dev *bdev, u16 addr,
+					const u8 *data, int len);
+	int (*get_bus_period) (struct bif_ctrl_dev *bdev);
+	int (*set_bus_period) (struct bif_ctrl_dev *bdev, int period_ns);
+	int (*get_battery_presence) (struct bif_ctrl_dev *bdev);
+	int (*get_battery_rid) (struct bif_ctrl_dev *bdev);
+	int (*get_bus_state) (struct bif_ctrl_dev *bdev);
+	int (*set_bus_state) (struct bif_ctrl_dev *bdev, int state);
+};
+
+/**
+ * struct bif_ctrl_desc - BIF bus controller descriptor
+ * @name:		Name used to identify the BIF controller
+ * @ops:		BIF operations supported by the BIF controller
+ * @bus_clock_min_ns:	Minimum tau_bif BIF bus clock period supported by the
+ *			BIF controller
+ * @bus_clock_max_ns:	Maximum tau_bif BIF bus clock period supported by the
+ *			BIF controller
+ *
+ * Each BIF controller registered with the BIF core is described with a
+ * structure of this type.
+ */
+struct bif_ctrl_desc {
+	const char *name;
+	struct bif_ctrl_ops *ops;
+	int bus_clock_min_ns;
+	int bus_clock_max_ns;
+};
+
+#ifdef CONFIG_BIF
+
+struct bif_ctrl_dev *bif_ctrl_register(struct bif_ctrl_desc *bif_desc,
+	struct device *dev, void *driver_data, struct device_node *of_node);
+
+void bif_ctrl_unregister(struct bif_ctrl_dev *bdev);
+
+void *bdev_get_drvdata(struct bif_ctrl_dev *bdev);
+
+int bif_ctrl_notify_battery_changed(struct bif_ctrl_dev *bdev);
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev);
+
+#else
+
+static inline struct bif_ctrl_dev *bif_ctrl_register(
+	struct bif_ctrl_desc *bif_desc, struct device *dev, void *driver_data,
+	struct device_node *of_node)
+{ return ERR_PTR(-EINVAL); }
+
+static inline void bif_ctrl_unregister(struct bif_ctrl_dev *bdev) { }
+
+static inline void *bdev_get_drvdata(struct bif_ctrl_dev *bdev) { return NULL; }
+
+int bif_ctrl_notify_slave_irq(struct bif_ctrl_dev *bdev) { return -EINVAL; }
+
+#endif
+
+#endif
diff --git a/include/linux/coresight-cti.h b/include/linux/coresight-cti.h
new file mode 100644
index 0000000..7f2da3f
--- /dev/null
+++ b/include/linux/coresight-cti.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_CTI_H
+#define _LINUX_CORESIGHT_CTI_H
+
+struct coresight_cti_data {
+	int nr_ctis;
+	const char **names;
+};
+
+struct coresight_cti {
+	const char *name;
+	struct list_head link;
+};
+
+#ifdef CONFIG_CORESIGHT_CTI
+extern struct coresight_cti *coresight_cti_get(const char *name);
+extern void coresight_cti_put(struct coresight_cti *cti);
+extern int coresight_cti_map_trigin(
+			struct coresight_cti *cti, int trig, int ch);
+extern int coresight_cti_map_trigout(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigin(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigout(
+			struct coresight_cti *cti, int trig, int ch);
+#else
+static inline struct coresight_cti *coresight_cti_get(const char *name)
+{
+	return NULL;
+}
+static inline void coresight_cti_put(struct coresight_cti *cti) {}
+static inline int coresight_cti_map_trigin(
+			struct coresight_cti *cti, int trig, int ch)
+{
+	return -ENOSYS;
+}
+static inline int coresight_cti_map_trigout(
+			struct coresight_cti *cti, int trig, int ch)
+{
+	return -ENOSYS;
+}
+static inline void coresight_cti_unmap_trigin(
+			struct coresight_cti *cti, int trig, int ch) {}
+static inline void coresight_cti_unmap_trigout(
+			struct coresight_cti *cti, int trig, int ch) {}
+#endif
+
+#endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 6c26a3d..5ab7183 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -57,6 +57,7 @@
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID	(0x01) /* is residency time measurable? */
+#define CPUIDLE_FLAG_COUPLED	(0x02) /* state applies to multiple cpus */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
@@ -100,6 +101,12 @@
 	struct list_head 	device_list;
 	struct kobject		kobj;
 	struct completion	kobj_unregister;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+	int			safe_state_index;
+	cpumask_t		coupled_cpus;
+	struct cpuidle_coupled	*coupled;
+#endif
 };
 
 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -176,6 +183,10 @@
 
 #endif
 
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#endif
+
 /******************************
  * CPUIDLE GOVERNOR INTERFACE *
  ******************************/
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index d3ee879..7a5ab0d 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -112,10 +112,10 @@
 /* This needs to be modified manually now, when we add
  a new RANGE of SSIDs to the msg_mask_tbl */
 #define MSG_MASK_TBL_CNT		24
-#define EVENT_LAST_ID			0x099F
+#define EVENT_LAST_ID			0x09AB
 
 #define MSG_SSID_0			0
-#define MSG_SSID_0_LAST			93
+#define MSG_SSID_0_LAST			94
 #define MSG_SSID_1			500
 #define MSG_SSID_1_LAST			506
 #define MSG_SSID_2			1000
@@ -278,6 +278,9 @@
 	MSG_LVL_LOW,
 	MSG_LVL_MED,
 	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
 	MSG_LVL_LOW
 };
 
@@ -713,7 +716,7 @@
 /* LOG CODES */
 
 #define LOG_0	0x0
-#define LOG_1	0x1750
+#define LOG_1	0x1755
 #define LOG_2	0x0
 #define LOG_3	0x0
 #define LOG_4	0x4910
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 2f303e4..8a1b3a1 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -65,11 +65,37 @@
  */
 #define MAX_CMA_AREAS	(1 + CONFIG_CMA_AREAS)
 
-extern struct cma *dma_contiguous_default_area;
+extern struct cma *dma_contiguous_def_area;
 
 void dma_contiguous_reserve(phys_addr_t addr_limit);
-int dma_declare_contiguous(struct device *dev, unsigned long size,
-			   phys_addr_t base, phys_addr_t limit);
+
+int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
+				  phys_addr_t limit, const char *name);
+
+int dma_contiguous_add_device(struct device *dev, phys_addr_t base);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ *			      for particular device
+ * @dev:   Pointer to device structure.
+ * @size:  Size of the reserved memory.
+ * @base:  Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+
+static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
+					 phys_addr_t base, phys_addr_t limit)
+{
+	int ret;
+	ret = dma_contiguous_reserve_area(size, &base, limit, NULL);
+	if (ret == 0)
+		ret = dma_contiguous_add_device(dev, base);
+	return ret;
+}
 
 struct page *dma_alloc_from_contiguous(struct device *dev, int count,
 				       unsigned int order);
@@ -83,7 +109,7 @@
 static inline void dma_contiguous_reserve(phys_addr_t limit) { }
 
 static inline
-int dma_declare_contiguous(struct device *dev, unsigned long size,
+int dma_declare_contiguous(struct device *dev, phys_addr_t size,
 			   phys_addr_t base, phys_addr_t limit)
 {
 	return -ENOSYS;
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h
index b96ba84..b903dfb 100644
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ b/include/linux/i2c/atmel_mxt_ts.h
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2010 Samsung Electronics Co.Ltd
  * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -32,8 +32,10 @@
 /* Bootoader IDs */
 #define MXT_BOOTLOADER_ID_224		0x0A
 #define MXT_BOOTLOADER_ID_224E		0x06
+#define MXT_BOOTLOADER_ID_336S		0x1A
 #define MXT_BOOTLOADER_ID_1386		0x01
 #define MXT_BOOTLOADER_ID_1386E		0x10
+#define MXT_BOOTLOADER_ID_1664S		0x14
 
 /* Config data for a given maXTouch controller with a specific firmware */
 struct mxt_config_info {
@@ -75,6 +77,7 @@
 	int *key_codes;
 	bool need_calibration;
 	bool no_force_update;
+	u8 bl_addr;
 
 	u8(*read_chg) (void);
 	int (*init_hw) (bool);
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
index b779e42..9d03787 100644
--- a/include/linux/input/synaptics_dsx.h
+++ b/include/linux/input/synaptics_dsx.h
@@ -5,6 +5,7 @@
  *
  * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
  * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -47,6 +48,7 @@
 	bool x_flip;
 	bool y_flip;
 	bool regulator_en;
+	bool i2c_pull_up;
 	unsigned irq_gpio;
 	unsigned long irq_flags;
 	unsigned reset_gpio;
diff --git a/include/linux/ion.h b/include/linux/ion.h
index f27782f..f159fe2 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -2,7 +2,7 @@
  * include/linux/ion.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -482,6 +482,10 @@
 	return -ENODEV;
 }
 
+static inline void ion_mark_dangling_buffers_locked(struct ion_device *dev)
+{
+}
+
 static inline int msm_ion_do_cache_op(struct ion_client *client,
 			struct ion_handle *handle, void *vaddr,
 			unsigned long len, unsigned int cmd)
diff --git a/include/linux/leds-pm8xxx.h b/include/linux/leds-pm8xxx.h
index 1e672e3..e912585 100644
--- a/include/linux/leds-pm8xxx.h
+++ b/include/linux/leds-pm8xxx.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,10 @@
 
 #define PM8XXX_LEDS_DEV_NAME	"pm8xxx-led"
 
+#define WLED_FIRST_STRING (1 << 2)
+#define WLED_SECOND_STRING (1 << 1)
+#define WLED_THIRD_STRING (1 << 0)
+
 /**
  * enum pm8xxx_leds - PMIC8XXX supported led ids
  * @PM8XXX_ID_LED_KB_LIGHT - keyboard backlight led
@@ -77,7 +81,7 @@
 
 /**
  *  wled_config_data - wled configuration data
- *  @num_strings - number of wled strings supported
+ *  @strings - strings supported
  *  @ovp_val - over voltage protection threshold
  *  @boost_curr_lim - boot current limit
  *  @cp_select - high pole capacitance
@@ -86,9 +90,10 @@
  *  @cs_out_en - current sink output enable
  *  @op_fdbck - selection of output as feedback for the boost
  *  @cabc_en - enable cabc for backlight pwm control
+ *
  */
 struct wled_config_data {
-	u8	num_strings;
+	u8	strings;
 	u8	ovp_val;
 	u8	boost_curr_lim;
 	u8	cp_select;
@@ -97,6 +102,11 @@
 	bool	cs_out_en;
 	bool	op_fdbck;
 	bool	cabc_en;
+	bool	sstart_en;
+	bool	max_current_ind;
+	u8 max_three;
+	u8 max_two;
+	u8 max_one;
 };
 
 /**
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 785a33a..1c67b1e 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -188,6 +188,7 @@
 	int				btc_delay_ms;
 	int				btc_panic_if_cant_stop_chg;
 	int				stop_chg_upon_expiry;
+	bool				disable_chg_rmvl_wrkarnd;
 };
 
 enum pm8921_charger_source {
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index c5b492b..e9051e1 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -91,7 +91,6 @@
 	unsigned int quirks2;	/* More deviations from spec. */
 
 #define SDHCI_QUIRK2_HOST_OFF_CARD_ON			(1<<0)
-#define SDHCI_QUIRK2_OWN_CARD_DETECTION			(1<<1)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
diff --git a/include/linux/msm_audio_aac.h b/include/linux/msm_audio_aac.h
index ee71c3e..88024d9 100644
--- a/include/linux/msm_audio_aac.h
+++ b/include/linux/msm_audio_aac.h
@@ -14,6 +14,9 @@
 #define AUDIO_GET_AAC_ENC_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
   (AUDIO_MAX_COMMON_IOCTL_NUM+4), struct msm_audio_aac_enc_config)
 
+#define AUDIO_SET_AAC_MIX_CONFIG  _IOR(AUDIO_IOCTL_MAGIC, \
+(AUDIO_MAX_COMMON_IOCTL_NUM+5), unsigned)
+
 #define AUDIO_AAC_FORMAT_ADTS		-1
 #define	AUDIO_AAC_FORMAT_RAW		0x0000
 #define	AUDIO_AAC_FORMAT_PSUEDO_RAW	0x0001
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
index 409bcc8..2593154 100644
--- a/include/linux/msm_ion.h
+++ b/include/linux/msm_ion.h
@@ -281,6 +281,20 @@
 {
 	return -ENODEV;
 }
+
+static inline int msm_ion_secure_buffer(struct ion_client *client,
+					struct ion_handle *handle,
+					enum cp_mem_usage usage,
+					int flags)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ion_unsecure_buffer(struct ion_client *client,
+					struct ion_handle *handle)
+{
+	return -ENODEV;
+}
 #endif /* CONFIG_ION */
 
 #endif /* __KERNEL */
@@ -305,19 +319,6 @@
 	unsigned int length;
 };
 
-/* struct ion_flag_data - information about flags for this buffer
- *
- * @handle:	handle to get flags from
- * @flags:	flags of this handle
- *
- * Takes handle as an input and outputs the flags from the handle
- * in the flag field.
- */
-struct ion_flag_data {
-	struct ion_handle *handle;
-	unsigned long flags;
-};
-
 #define ION_IOC_MSM_MAGIC 'M'
 
 /**
@@ -342,13 +343,4 @@
 #define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 2, \
 						struct ion_flush_data)
 
-/**
- * DOC: ION_IOC_GET_FLAGS - get the flags of the handle
- *
- * Gets the flags of the current handle which indicate cachability,
- * secure state etc.
- */
-#define ION_IOC_GET_FLAGS		_IOWR(ION_IOC_MSM_MAGIC, 3, \
-						struct ion_flag_data)
-
 #endif
diff --git a/include/linux/msm_ipa.h b/include/linux/msm_ipa.h
index 1b869b1..30bf4f2 100644
--- a/include/linux/msm_ipa.h
+++ b/include/linux/msm_ipa.h
@@ -155,6 +155,10 @@
  * wlan client normal: wlan client moved out of power save
  * sw routing enable: ipa routing is disabled
  * sw routing disable: ipa routing is enabled
+ * wlan ap connect: wlan AP(access point) is up
+ * wlan ap disconnect: wlan AP(access point) is down
+ * wlan sta connect: wlan STA(station) is up
+ * wlan sta disconnect: wlan STA(station) is down
  */
 enum ipa_wlan_event {
 	WLAN_CLIENT_CONNECT,
@@ -163,6 +167,10 @@
 	WLAN_CLIENT_NORMAL_MODE,
 	SW_ROUTING_ENABLE,
 	SW_ROUTING_DISABLE,
+	WLAN_AP_CONNECT,
+	WLAN_AP_DISCONNECT,
+	WLAN_STA_CONNECT,
+	WLAN_STA_DISCONNECT,
 };
 
 
@@ -761,4 +769,87 @@
 				IPA_IOCTL_PULL_MSG, \
 				struct ipa_msg_meta *)
 
+/*
+ * unique magic number of the Tethering bridge ioctls
+ */
+#define TETH_BRIDGE_IOC_MAGIC 0xCE
+
+/*
+ * Ioctls supported by Tethering bridge driver
+ */
+#define TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE	0
+#define TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS	1
+#define TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS	2
+#define TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES	3
+#define TETH_BRIDGE_IOCTL_MAX			4
+
+
+/**
+ * enum teth_link_protocol_type - link protocol (IP / Ethernet)
+ */
+enum teth_link_protocol_type {
+	TETH_LINK_PROTOCOL_IP,
+	TETH_LINK_PROTOCOL_ETHERNET,
+	TETH_LINK_PROTOCOL_MAX,
+};
+
+/**
+ * enum teth_aggr_protocol_type - Aggregation protocol (MBIM / TLP)
+ */
+enum teth_aggr_protocol_type {
+	TETH_AGGR_PROTOCOL_NONE,
+	TETH_AGGR_PROTOCOL_MBIM,
+	TETH_AGGR_PROTOCOL_TLP,
+	TETH_AGGR_PROTOCOL_MAX,
+};
+
+/**
+ * struct teth_aggr_params_link - Aggregation parameters for uplink/downlink
+ * @aggr_prot:			Aggregation protocol (MBIM / TLP)
+ * @max_transfer_size_byte:	Maximal size of aggregated packet in bytes.
+ *				Default value is 16*1024.
+ * @max_datagrams:		Maximal number of IP packets in an aggregated
+ *				packet. Default value is 16
+ */
+struct teth_aggr_params_link {
+	enum teth_aggr_protocol_type aggr_prot;
+	uint32_t max_transfer_size_byte;
+	uint32_t max_datagrams;
+};
+
+
+/**
+ * struct teth_aggr_params - Aggregation parmeters
+ * @ul:	Uplink parameters
+ * @dl: Downlink parmaeters
+ */
+struct teth_aggr_params {
+	struct teth_aggr_params_link ul;
+	struct teth_aggr_params_link dl;
+};
+
+/**
+ * struct teth_aggr_capabilities - Aggregation capabilities
+ * @num_protocols:		Number of protocols described in the array
+ * @prot_caps[]:		Array of aggregation capabilities per protocol
+ */
+struct teth_aggr_capabilities {
+	uint16_t num_protocols;
+	struct teth_aggr_params_link prot_caps[0];
+};
+
+
+#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
+				enum teth_link_protocol_type)
+#define TETH_BRIDGE_IOC_SET_AGGR_PARAMS _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS, \
+				struct teth_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_PARAMS _IOR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS, \
+				struct teth_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES _IOWR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES, \
+				struct teth_aggr_capabilities *)
+
 #endif /* _MSM_IPA_H_ */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index bc35d14..45bc0ea 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -564,6 +564,15 @@
 	mdp_op_max,
 };
 
+enum {
+	WB_FORMAT_NV12,
+	WB_FORMAT_RGB_565,
+	WB_FORMAT_RGB_888,
+	WB_FORMAT_xRGB_8888,
+	WB_FORMAT_ARGB_8888,
+	WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
 struct msmfb_mdp_pp {
 	uint32_t op;
 	union {
@@ -585,6 +594,7 @@
 	metadata_op_base_blend,
 	metadata_op_frame_rate,
 	metadata_op_vic,
+	metadata_op_wb_format,
 	metadata_op_max
 };
 
@@ -592,11 +602,17 @@
 	uint32_t is_premultiplied;
 };
 
+struct mdp_mixer_cfg {
+	uint32_t writeback_format;
+	uint32_t alpha;
+};
+
 struct msmfb_metadata {
 	uint32_t op;
 	uint32_t flags;
 	union {
 		struct mdp_blend_cfg blend_cfg;
+		struct mdp_mixer_cfg mixer_cfg;
 		uint32_t panel_frame_rate;
 		uint32_t video_info_code;
 	} data;
@@ -646,8 +662,13 @@
 	ROTATOR_SUBSYSTEM_ID,
 };
 
+enum {
+	MDP_IOMMU_DOMAIN_CP,
+	MDP_IOMMU_DOMAIN_NS,
+};
+
 #ifdef __KERNEL__
-int msm_fb_get_iommu_domain(void);
+int msm_fb_get_iommu_domain(struct fb_info *info, int domain);
 /* get the framebuffer physical address information */
 int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num,
 	int subsys_id);
@@ -660,6 +681,7 @@
 		struct msmfb_data *data);
 int msm_fb_writeback_stop(struct fb_info *info);
 int msm_fb_writeback_terminate(struct fb_info *info);
+int msm_fb_writeback_set_secure(struct fb_info *info, int enable);
 #endif
 
 #endif /*_MSM_MDP_H_*/
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index c6ee4f0..0683296 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -365,8 +365,8 @@
  *	requests to connect to a specified network but without separating
  *	auth and assoc steps. For this, you need to specify the SSID in a
  *	%NL80211_ATTR_SSID attribute, and can optionally specify the association
- *	IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC,
- *	%NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
+ *	IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
+ *	%NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
  *	%NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
  *	%NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
  *	Background scan period can optionally be
@@ -906,7 +906,7 @@
  * @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is
  *	used for the association (&enum nl80211_mfp, represented as a u32);
  *	this attribute can be used
- *	with %NL80211_CMD_ASSOCIATE request
+ *	with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests
  *
  * @NL80211_ATTR_STA_FLAGS2: Attribute containing a
  *	&struct nl80211_sta_flag_update.
diff --git a/include/linux/of_coresight.h b/include/linux/of_coresight.h
index 6a5e4d4..0943dda 100644
--- a/include/linux/of_coresight.h
+++ b/include/linux/of_coresight.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,12 +16,19 @@
 #ifdef CONFIG_OF
 extern struct coresight_platform_data *of_get_coresight_platform_data(
 				struct device *dev, struct device_node *node);
+extern struct coresight_cti_data *of_get_coresight_cti_data(
+				struct device *dev, struct device_node *node);
 #else
 static inline struct coresight_platform_data *of_get_coresight_platform_data(
 				struct device *dev, struct device_node *node)
 {
 	return NULL;
 }
+static inline struct coresight_cti_data *of_get_coresight_cti_data(
+				struct device *dev, struct device_node *node)
+{
+	return NULL;
+}
 #endif
 
 #endif
diff --git a/drivers/platform/msm/ipa/a2_service.h b/include/linux/platform_data/qcom_ssm.h
similarity index 61%
rename from drivers/platform/msm/ipa/a2_service.h
rename to include/linux/platform_data/qcom_ssm.h
index 80885da..03ac67a 100644
--- a/drivers/platform/msm/ipa/a2_service.h
+++ b/include/linux/platform_data/qcom_ssm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,15 +10,12 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _A2_SERVICE_H_
-#define _A2_SERVICE_H_
+#ifndef __QCOM_SSM_H_
+#define __QCOM_SSM_H_
 
-int a2_mux_initialize(void);
+struct ssm_platform_data {
+	bool need_key_exchg;
+	const char *channel_name;
+};
 
-int a2_mux_close(void);
-
-int a2_mux_open_port(int wwan_logical_channel_id, void *rx_cb,
-		void *tx_complete_cb);
-
-#endif /* _A2_SERVICE_H_ */
-
+#endif /* __QCOM_SSM_H_ */
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 1849cee..05d75ce 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1203,20 +1203,15 @@
 static inline int32_t qpnp_adc_scale_therm_pu1(int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
-			struct qpnp_vadc_result *chan_rslt);
+			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
 static inline int32_t qpnp_adc_scale_therm_pu2(int32_t adc_code,
 			const struct qpnp_adc_properties *adc_prop,
 			const struct qpnp_vadc_chan_properties *chan_prop,
-			struct qpnp_vadc_result *chan_rslt);
+			struct qpnp_vadc_result *chan_rslt)
 { return -ENXIO; }
 static inline int32_t qpnp_vadc_is_ready(void)
 { return -ENXIO; }
-static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
-			const struct qpnp_adc_properties *adc_prop,
-			const struct qpnp_adc_chan_properties *chan_prop,
-			struct qpnp_adc_chan_result *chan_rslt)
-{ return -ENXIO; }
 static inline int32_t qpnp_get_vadc_gain_and_offset(
 			struct qpnp_vadc_linear_graph *param,
 			enum qpnp_adc_calib_type calib_type)
diff --git a/include/linux/regulator/krait-regulator.h b/include/linux/regulator/krait-regulator.h
index 836f9d6..b784531 100644
--- a/include/linux/regulator/krait-regulator.h
+++ b/include/linux/regulator/krait-regulator.h
@@ -13,7 +13,8 @@
 #ifndef __KRAIT_REGULATOR_H__
 #define __KRAIT_REGULATOR_H__
 
-#define KRAIT_REGULATOR_DRIVER_NAME "krait-power-regulator"
+#define KRAIT_REGULATOR_DRIVER_NAME	"krait-power-regulator"
+#define KRAIT_PDN_DRIVER_NAME		"krait-pdn"
 
 /**
  * krait_power_init - driver initialization function
diff --git a/include/linux/uhid.h b/include/linux/uhid.h
new file mode 100644
index 0000000..16b786a
--- /dev/null
+++ b/include/linux/uhid.h
@@ -0,0 +1,33 @@
+#ifndef __UHID_H_
+#define __UHID_H_
+
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Public header for user-space communication. We try to keep every structure
+ * aligned but to be safe we also use __attribute__((__packed__)). Therefore,
+ * the communication should be ABI compatible even between architectures.
+ */
+
+#include <linux/input.h>
+#include <linux/types.h>
+
+enum uhid_event_type {
+	UHID_DUMMY,
+};
+
+struct uhid_event {
+	__u32 type;
+} __attribute__((__packed__));
+
+#endif /* __UHID_H_ */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index b0b718f..e249953 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -304,6 +304,7 @@
 	struct msm_otg_platform_data *pdata;
 	int irq;
 	int async_irq;
+	struct clk *xo_clk;
 	struct clk *clk;
 	struct clk *pclk;
 	struct clk *phy_reset_clk;
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index 2b9a7c7..2319c48 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -65,6 +65,7 @@
 void wcnss_resume_notify(void);
 void wcnss_riva_log_debug_regs(void);
 void wcnss_pronto_log_debug_regs(void);
+int wcnss_cold_boot_done(void);
 
 #define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
 #define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
diff --git a/include/media/msmb_pproc.h b/include/media/msmb_pproc.h
index b003f99..6bac1d6 100644
--- a/include/media/msmb_pproc.h
+++ b/include/media/msmb_pproc.h
@@ -85,6 +85,8 @@
 	uint32_t *cpp_cmd_msg;
 	int src_fd;
 	int dst_fd;
+	struct ion_handle *src_ion_handle;
+	struct ion_handle *dest_ion_handle;
 };
 
 struct msm_ver_num_info {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5e32ff7..6666c69 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1199,6 +1199,7 @@
  * @ie: IEs for association request
  * @ie_len: Length of assoc_ie in octets
  * @privacy: indicates whether privacy-enabled APs should be used
+ * @mfp: indicate whether management frame protection is used
  * @crypto: crypto settings
  * @key_len: length of WEP key for shared key authentication
  * @key_idx: index of WEP key for shared key authentication
@@ -1219,6 +1220,7 @@
 	u8 *ie;
 	size_t ie_len;
 	bool privacy;
+	enum nl80211_mfp mfp;
 	struct cfg80211_crypto_settings crypto;
 	const u8 *key;
 	u8 key_len, key_idx;
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
index de41c6e..40b0e1e 100644
--- a/include/sound/apr_audio.h
+++ b/include/sound/apr_audio.h
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1513,6 +1513,17 @@
 	struct asm_dual_mono channel_map;
 } __packed;
 
+#define ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG        0x00010DD8
+
+/* Structure for AAC decoder stereo coefficient setting. */
+
+struct asm_aac_stereo_mix_coeff_selection_param {
+	struct apr_hdr				hdr;
+	u32					param_id;
+	u32					param_size;
+	u32					aac_stereo_mix_coeff_flag;
+} __packed;
+
 #define ASM_ENCDEC_DEC_CHAN_MAP				 0x00010D82
 struct asm_stream_cmd_encdec_channelmap {
 	struct apr_hdr hdr;
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index dc30cd6..5744a43 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -249,6 +249,8 @@
 int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
 			uint16_t sce_left, uint16_t sce_right);
 
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff);
+
 int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
 		uint16_t min_rate, uint16_t max_rate,
 		uint16_t reduced_rate_level, uint16_t rate_modulation_cmd);
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
index 42c9120..406407d 100644
--- a/include/sound/q6asm.h
+++ b/include/sound/q6asm.h
@@ -86,18 +86,18 @@
 
 #define SESSION_MAX	0x08
 
-#define SOFT_PAUSE_PERIOD       30   /* ramp up/down for 30ms    */
+#define SOFT_PAUSE_PERIOD       30   /* ramp up/down for 30ms */
 #define SOFT_PAUSE_STEP_LINEAR  0    /* Step value 0ms or 0us */
-#define SOFT_PAUSE_STEP         2000 /* Step value 2000ms or 2000us */
+#define SOFT_PAUSE_STEP         0    /* Step value 0ms or 0us */
 enum {
 	SOFT_PAUSE_CURVE_LINEAR = 0,
 	SOFT_PAUSE_CURVE_EXP,
 	SOFT_PAUSE_CURVE_LOG,
 };
 
-#define SOFT_VOLUME_PERIOD       30   /* ramp up/down for 30ms    */
+#define SOFT_VOLUME_PERIOD       30   /* ramp up/down for 30ms */
 #define SOFT_VOLUME_STEP_LINEAR  0    /* Step value 0ms or 0us */
-#define SOFT_VOLUME_STEP         2000 /* Step value 2000ms or 2000us */
+#define SOFT_VOLUME_STEP         0    /* Step value 0ms or 0us */
 enum {
 	SOFT_VOLUME_CURVE_LINEAR = 0,
 	SOFT_VOLUME_CURVE_EXP,
@@ -273,6 +273,8 @@
 int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
 			uint16_t sce_left, uint16_t sce_right);
 
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff);
+
 int q6asm_set_encdec_chan_map(struct audio_client *ac,
 			uint32_t num_channels);
 
diff --git a/kernel/sys.c b/kernel/sys.c
index e7006eb..39791be 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1179,15 +1179,16 @@
  * Work around broken programs that cannot handle "Linux 3.0".
  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
  */
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
 {
 	int ret = 0;
-	char buf[65];
 
 	if (current->personality & UNAME26) {
-		char *rest = UTS_RELEASE;
+		const char *rest = UTS_RELEASE;
+		char buf[65] = { 0 };
 		int ndots = 0;
 		unsigned v;
+		size_t copy;
 
 		while (*rest) {
 			if (*rest == '.' && ++ndots >= 3)
@@ -1197,8 +1198,9 @@
 			rest++;
 		}
 		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
-		snprintf(buf, len, "2.6.%u%s", v, rest);
-		ret = copy_to_user(release, buf, len);
+		copy = min(sizeof(buf), max_t(size_t, 1, len));
+		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+		ret = copy_to_user(release, buf, copy + 1);
 	}
 	return ret;
 }
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f5dfe0c..08b5ae7 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -857,6 +857,7 @@
  *       correctness of the format string and va_list arguments.
  * - 'K' For a kernel pointer that should be hidden from unprivileged users
  * - 'NF' For a netdev_features_t
+ * - 'a' For a phys_addr_t type and its derivative types (passed by reference)
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -941,6 +942,12 @@
 			return netdev_feature_string(buf, end, ptr, spec);
 		}
 		break;
+	case 'a':
+		spec.flags |= SPECIAL | SMALL | ZEROPAD;
+		spec.field_width = sizeof(phys_addr_t) * 2 + 2;
+		spec.base = 16;
+		return number(buf, end,
+			      (unsigned long long) *((phys_addr_t *)ptr), spec);
 	}
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 7ecbc70..9962c88 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1474,6 +1474,8 @@
 	hdev->sniff_max_interval = 800;
 	hdev->sniff_min_interval = 80;
 
+	set_bit(HCI_SETUP, &hdev->flags);
+
 	tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
 	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
 	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
@@ -1542,7 +1544,6 @@
 	}
 
 	set_bit(HCI_AUTO_OFF, &hdev->flags);
-	set_bit(HCI_SETUP, &hdev->flags);
 	queue_work(hdev->workqueue, &hdev->power_on);
 
 	hci_notify(hdev, HCI_DEV_REG);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index a1f2955..8658b94 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -383,23 +383,30 @@
 		err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
 		goto failed;
 	}
+	/* Avoid queing power_on/off when the set up is going on via
+	 * hci_register_dev
+	 */
+	if (!test_bit(HCI_SETUP, &hdev->flags)) {
+		cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data,
+									len);
+		if (!cmd) {
+			err = -ENOMEM;
+			goto failed;
+		}
 
-	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
-	if (!cmd) {
-		err = -ENOMEM;
+		hci_dev_unlock_bh(hdev);
+
+		if (cp->val)
+			queue_work(hdev->workqueue, &hdev->power_on);
+		else
+			queue_work(hdev->workqueue, &hdev->power_off);
+
+		err = 0;
+		hci_dev_put(hdev);
+	} else {
+		err = cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
 		goto failed;
 	}
-
-	hci_dev_unlock_bh(hdev);
-
-	if (cp->val)
-		queue_work(hdev->workqueue, &hdev->power_on);
-	else
-		queue_work(hdev->workqueue, &hdev->power_off);
-
-	err = 0;
-	hci_dev_put(hdev);
-
 	return err;
 
 failed:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5c2e805..1ccc69e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5173,6 +5173,15 @@
 		connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
 	}
 
+	if (info->attrs[NL80211_ATTR_USE_MFP]) {
+		connect.mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
+		if (connect.mfp != NL80211_MFP_REQUIRED &&
+		    connect.mfp != NL80211_MFP_NO)
+			return -EINVAL;
+	} else {
+		connect.mfp = NL80211_MFP_NO;
+	}
+
 	if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
 		connect.channel =
 			ieee80211_get_channel(wiphy,
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index bbbed73..ab91446 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -190,7 +190,8 @@
 					    prev_bssid,
 					    params->ssid, params->ssid_len,
 					    params->ie, params->ie_len,
-					    false, &params->crypto,
+					    params->mfp != NL80211_MFP_NO,
+					    &params->crypto,
 					    params->flags, &params->ht_capa,
 					    &params->ht_capa_mask);
 		if (err)
diff --git a/scripts/build-all.py b/scripts/build-all.py
index f5048e0..4789af7 100755
--- a/scripts/build-all.py
+++ b/scripts/build-all.py
@@ -1,6 +1,6 @@
 #! /usr/bin/env python
 
-# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
+# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are met:
@@ -88,7 +88,6 @@
         r'[fm]sm[0-9]*_defconfig',
         r'apq*_defconfig',
         r'qsd*_defconfig',
-        r'omap2*_defconfig',
         )
     for p in arch_pats:
         for n in glob.glob('arch/arm/configs/' + p):
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 4bcea07..c8647fb1 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -50,9 +50,9 @@
 #define MSM8X10_WCD_I2S_MASTER_MODE_MASK	0x08
 #define MSM8X10_DINO_CODEC_BASE_ADDR		0xFE043000
 
-#define MAX_MSM8X10_WCD_DEVICE	2
+#define MAX_MSM8X10_WCD_DEVICE	4
 #define CODEC_DT_MAX_PROP_SIZE	40
-#define MSM8X10_WCD_I2C_GSBI_SLAVE_ID "2-000d"
+#define MSM8X10_WCD_I2C_GSBI_SLAVE_ID "1-000d"
 
 enum {
 	MSM8X10_WCD_I2C_TOP_LEVEL = 0,
@@ -88,6 +88,7 @@
 	IIR2,
 	IIR_MAX,
 };
+
 /* Codec supports 5 bands */
 enum {
 	BAND1 = 0,
@@ -119,7 +120,6 @@
 	struct wcd9xxx_mbhc mbhc;
 };
 
-
 static unsigned short rx_digital_gain_reg[] = {
 	MSM8X10_WCD_A_CDC_RX1_VOL_CTL_B2_CTL,
 	MSM8X10_WCD_A_CDC_RX2_VOL_CTL_B2_CTL,
@@ -171,7 +171,7 @@
 	return rtn;
 }
 
-static int msm8x10_wcd_abh_write_device(u16 reg, u8 *value, u32 bytes)
+static int msm8x10_wcd_abh_write_device(u16 reg, unsigned int *value, u32 bytes)
 {
 	u32 temp = ((u32)(*value)) & 0x000000FF;
 	u32 offset = (((u32)(reg)) ^ 0x00000400) & 0x00000FFF;
@@ -179,10 +179,10 @@
 	return 0;
 }
 
-static int msm8x10_wcd_abh_read_device(u16 reg, u32 bytes, u8 *value)
+static int msm8x10_wcd_abh_read_device(u16 reg, u32 bytes, unsigned int *value)
 {
 	u32 offset = (((u32)(reg)) ^ 0x00000400) & 0x00000FFF;
-	*value = (u8)ioread32(ioremap(MSM8X10_DINO_CODEC_BASE_ADDR +
+	*value = ioread32(ioremap(MSM8X10_DINO_CODEC_BASE_ADDR +
 				      offset, 4));
 	return 0;
 }
@@ -194,10 +194,10 @@
 	int ret;
 	u8 reg_addr = 0;
 	u8 data[bytes + 1];
-	struct msm8x10_wcd_i2c *msm8x10_wcd;
+	struct msm8x10_wcd_i2c *msm8x10_wcd = NULL;
 
 	ret = get_i2c_msm8x10_wcd_device_info(reg, &msm8x10_wcd);
-	if (!ret) {
+	if (ret) {
 		pr_err("%s: Invalid register address\n", __func__);
 		return ret;
 	}
@@ -219,7 +219,7 @@
 	/* Try again if the write fails */
 	if (ret != 1) {
 		ret = i2c_transfer(msm8x10_wcd->client->adapter,
-						msm8x10_wcd->xfer_msg, 1);
+				   msm8x10_wcd->xfer_msg, 1);
 		if (ret != 1) {
 			pr_err("failed to write the device\n");
 			return ret;
@@ -235,11 +235,11 @@
 	struct i2c_msg *msg;
 	int ret = 0;
 	u8 reg_addr = 0;
-	struct msm8x10_wcd_i2c *msm8x10_wcd;
+	struct msm8x10_wcd_i2c *msm8x10_wcd = NULL;
 	u8 i = 0;
 
 	ret = get_i2c_msm8x10_wcd_device_info(reg, &msm8x10_wcd);
-	if (!ret) {
+	if (ret) {
 		pr_err("%s: Invalid register address\n", __func__);
 		return ret;
 	}
@@ -256,7 +256,6 @@
 		msg->len = 1;
 		msg->flags = 0;
 		msg->buf = &reg_addr;
-
 		msg = &msm8x10_wcd->xfer_msg[1];
 		msg->addr = msm8x10_wcd->client->addr;
 		msg->len = 1;
@@ -275,38 +274,45 @@
 			}
 		}
 	}
+	pr_debug("%s: Reg 0x%x = 0x%x\n", __func__, reg, *dest);
 	return 0;
 }
 
-static int msm8x10_wcd_reg_read(struct msm8x10_wcd *msm8x10_wcd, u16 reg)
+int msm8x10_wcd_i2c_read(unsigned short reg, int bytes, void *dest)
 {
-	u8 val;
+	return msm8x10_wcd_i2c_read_device(reg, bytes, dest);
+}
+
+int msm8x10_wcd_i2c_write(unsigned short reg, int bytes, void *src)
+{
+	return msm8x10_wcd_i2c_write_device(reg, src, bytes);
+}
+
+static int msm8x10_wcd_reg_read(struct msm8x10_wcd *msm8x10_wcd,
+				u16 reg, unsigned int *val)
+{
 	int ret = -EINVAL;
 
 	/* check if use I2C interface for Helicon or AHB for Dino */
 	mutex_lock(&msm8x10_wcd->io_lock);
 	if (MSM8X10_WCD_IS_HELICON_REG(reg))
-		ret = msm8x10_wcd_i2c_read_device(reg, 1, &val);
+		ret = msm8x10_wcd_i2c_read(reg, 1, val);
 	else if (MSM8X10_WCD_IS_DINO_REG(reg))
-		ret = msm8x10_wcd_abh_read_device(reg, 1, &val);
+		ret = msm8x10_wcd_abh_read_device(reg, 1, val);
 	mutex_unlock(&msm8x10_wcd->io_lock);
-
-	if (ret < 0)
-		return ret;
-	else
-		return val;
+	return ret;
 }
 
 
 static int msm8x10_wcd_reg_write(struct msm8x10_wcd *msm8x10_wcd, u16  reg,
-				 u8 val)
+				 unsigned int val)
 {
 	int ret = -EINVAL;
 
 	/* check if use I2C interface for Helicon or AHB for Dino */
 	mutex_lock(&msm8x10_wcd->io_lock);
 	if (MSM8X10_WCD_IS_HELICON_REG(reg))
-		ret = msm8x10_wcd_i2c_write_device(reg, &val, 1);
+		ret = msm8x10_wcd_i2c_write(reg, 1, &val);
 	else if (MSM8X10_WCD_IS_DINO_REG(reg))
 		ret = msm8x10_wcd_abh_write_device(reg, &val, 1);
 	mutex_unlock(&msm8x10_wcd->io_lock);
@@ -331,12 +337,13 @@
 	return rtn;
 }
 
-static int msm8x10_wcd_volatile(struct snd_soc_codec *ssc, unsigned int reg)
+static int msm8x10_wcd_volatile(struct snd_soc_codec *codec, unsigned int reg)
 {
 	/*
 	 * Registers lower than 0x100 are top level registers which can be
 	 * written by the Taiko core driver.
 	 */
+	dev_dbg(codec->dev, "%s: reg 0x%x\n", __func__, reg);
 
 	if ((reg >= MSM8X10_WCD_A_CDC_MBHC_EN_CTL) || (reg < 0x100))
 		return 1;
@@ -373,7 +380,7 @@
 			     unsigned int value)
 {
 	int ret;
-
+	dev_dbg(codec->dev, "%s: Write from reg 0x%x\n", __func__, reg);
 	if (reg == SND_SOC_NOPM)
 		return 0;
 
@@ -395,6 +402,7 @@
 	unsigned int val;
 	int ret;
 
+	dev_dbg(codec->dev, "%s: Read from reg 0x%x\n", __func__, reg);
 	if (reg == SND_SOC_NOPM)
 		return 0;
 
@@ -411,7 +419,7 @@
 				reg, ret);
 	}
 
-	val = msm8x10_wcd_reg_read(codec->control_data, reg);
+	ret = msm8x10_wcd_reg_read(codec->control_data, reg, &val);
 	return val;
 }
 
@@ -431,7 +439,7 @@
 
 	if (!regnode) {
 		dev_err(dev, "Looking up %s property in node %s failed",
-				prop_name, dev->of_node->full_name);
+			prop_name, dev->of_node->full_name);
 		return -ENODEV;
 	}
 	vreg->name = vreg_name;
@@ -442,7 +450,7 @@
 
 	if (!prop || (len != (2 * sizeof(__be32)))) {
 		dev_err(dev, "%s %s property\n",
-				prop ? "invalid format" : "no", prop_name);
+			prop ? "invalid format" : "no", prop_name);
 		return -ENODEV;
 	} else {
 		vreg->min_uV = be32_to_cpup(&prop[0]);
@@ -450,18 +458,18 @@
 	}
 
 	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
-			"qcom,%s-current", vreg_name);
+		"qcom,%s-current", vreg_name);
 
 	ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
 	if (ret) {
 		dev_err(dev, "Looking up %s property in node %s failed",
-				prop_name, dev->of_node->full_name);
+			prop_name, dev->of_node->full_name);
 		return -ENODEV;
 	}
 	vreg->optimum_uA = prop_val;
 
 	dev_info(dev, "%s: vol=[%d %d]uV, curr=[%d]uA\n", vreg->name,
-		vreg->min_uV, vreg->max_uV, vreg->optimum_uA);
+		 vreg->min_uV, vreg->max_uV, vreg->optimum_uA);
 	return 0;
 }
 
@@ -473,7 +481,7 @@
 	u32 prop_val;
 
 	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
-			"qcom,cdc-micbias-ldoh-v");
+		 "qcom,cdc-micbias-ldoh-v");
 	ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
 	if (ret) {
 		dev_err(dev, "Looking up %s property in node %s failed",
@@ -483,7 +491,7 @@
 	micbias->ldoh_v = (u8)prop_val;
 
 	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
-			"qcom,cdc-micbias-cfilt1-mv");
+		 "qcom,cdc-micbias-cfilt1-mv");
 	ret = of_property_read_u32(dev->of_node, prop_name,
 				   &micbias->cfilt1_mv);
 	if (ret) {
@@ -493,7 +501,7 @@
 	}
 
 	snprintf(prop_name, CODEC_DT_MAX_PROP_SIZE,
-			"qcom,cdc-micbias1-cfilt-sel");
+		 "qcom,cdc-micbias1-cfilt-sel");
 	ret = of_property_read_u32(dev->of_node, prop_name, &prop_val);
 	if (ret) {
 		dev_err(dev, "Looking up %s property in node %s failed",
@@ -508,7 +516,7 @@
 	     MICBIAS_EXT_BYP_CAP : MICBIAS_NO_EXT_BYP_CAP);
 
 	dev_dbg(dev, "ldoh_v  %u cfilt1_mv %u\n",
-			(u32)micbias->ldoh_v, (u32)micbias->cfilt1_mv);
+		(u32)micbias->ldoh_v, (u32)micbias->cfilt1_mv);
 	dev_dbg(dev, "bias1_cfilt_sel %u\n", (u32)micbias->bias1_cfilt_sel);
 	dev_dbg(dev, "bias1_ext_cap %d\n", micbias->bias1_cap_mode);
 
@@ -533,13 +541,14 @@
 		num_of_supplies = ARRAY_SIZE(msm8x10_wcd_supplies);
 	} else {
 		dev_err(dev, "%s unsupported device %s\n",
-				__func__, dev_name(dev));
+			__func__, dev_name(dev));
 		goto err;
 	}
 
 	if (num_of_supplies > ARRAY_SIZE(pdata->regulator)) {
 		dev_err(dev, "%s: Num of supplies %u > max supported %u\n",
-		      __func__, num_of_supplies, ARRAY_SIZE(pdata->regulator));
+			__func__, num_of_supplies,
+			ARRAY_SIZE(pdata->regulator));
 
 		goto err;
 	}
@@ -574,8 +583,8 @@
 		struct snd_kcontrol *kcontrol, int event)
 {
 	struct snd_soc_codec *codec = w->codec;
+	dev_dbg(codec->dev, "%s: event = %d\n", __func__, event);
 
-	pr_debug("%s %d\n", __func__, event);
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
 		/* Enable charge pump clock*/
@@ -623,13 +632,11 @@
 	} else if (ear_pa_gain == 0x04) {
 		ucontrol->value.integer.value[0] = 1;
 	} else  {
-		pr_err("%s: ERROR: Unsupported Ear Gain = 0x%x\n",
-				__func__, ear_pa_gain);
+		dev_err(codec->dev, "%s: ERROR: Unsupported Ear Gain = 0x%x\n",
+			__func__, ear_pa_gain);
 		return -EINVAL;
 	}
-
-	pr_debug("%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
-
+	dev_dbg(codec->dev, "%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain);
 	return 0;
 }
 
@@ -639,8 +646,8 @@
 	u8 ear_pa_gain;
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 
-	pr_debug("%s: ucontrol->value.integer.value[0]  = %ld\n",
-		 __func__, ucontrol->value.integer.value[0]);
+	dev_dbg(codec->dev, "%s: ucontrol->value.integer.value[0] = %ld\n",
+		__func__, ucontrol->value.integer.value[0]);
 
 	switch (ucontrol->value.integer.value[0]) {
 	case 0:
@@ -673,7 +680,7 @@
 			    (MSM8X10_WCD_A_CDC_IIR1_CTL + 64 * iir_idx)) &
 		(1 << band_idx);
 
-	pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
 		iir_idx, band_idx,
 		(uint32_t)ucontrol->value.integer.value[0]);
 	return 0;
@@ -692,15 +699,15 @@
 
 	/* Mask first 5 bits, 6-8 are reserved */
 	snd_soc_update_bits(codec, (MSM8X10_WCD_A_CDC_IIR1_CTL + 64 * iir_idx),
-		(1 << band_idx), (value << band_idx));
+			    (1 << band_idx), (value << band_idx));
 
-	pr_debug("%s: IIR #%d band #%d enable %d\n", __func__,
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d enable %d\n", __func__,
 		iir_idx, band_idx, value);
 	return 0;
 }
 static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec,
-				int iir_idx, int band_idx,
-				int coeff_idx)
+				   int iir_idx, int band_idx,
+				   int coeff_idx)
 {
 	/* Address does not automatically update if reading */
 	snd_soc_write(codec,
@@ -734,7 +741,7 @@
 	ucontrol->value.integer.value[4] =
 		get_iir_band_coeff(codec, iir_idx, band_idx, 4);
 
-	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
 		"%s: IIR #%d band #%d b1 = 0x%x\n"
 		"%s: IIR #%d band #%d b2 = 0x%x\n"
 		"%s: IIR #%d band #%d a1 = 0x%x\n"
@@ -780,17 +787,17 @@
 					kcontrol->private_value)->shift;
 
 	set_iir_band_coeff(codec, iir_idx, band_idx, 0,
-				ucontrol->value.integer.value[0]);
+			   ucontrol->value.integer.value[0]);
 	set_iir_band_coeff(codec, iir_idx, band_idx, 1,
-				ucontrol->value.integer.value[1]);
+			   ucontrol->value.integer.value[1]);
 	set_iir_band_coeff(codec, iir_idx, band_idx, 2,
-				ucontrol->value.integer.value[2]);
+			   ucontrol->value.integer.value[2]);
 	set_iir_band_coeff(codec, iir_idx, band_idx, 3,
-				ucontrol->value.integer.value[3]);
+			   ucontrol->value.integer.value[3]);
 	set_iir_band_coeff(codec, iir_idx, band_idx, 4,
-				ucontrol->value.integer.value[4]);
+			   ucontrol->value.integer.value[4]);
 
-	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
+	dev_dbg(codec->dev, "%s: IIR #%d band #%d b0 = 0x%x\n"
 		"%s: IIR #%d band #%d b1 = 0x%x\n"
 		"%s: IIR #%d band #%d b2 = 0x%x\n"
 		"%s: IIR #%d band #%d a1 = 0x%x\n"
@@ -970,7 +977,6 @@
 	"ZERO", "ADC1", "ADC2", "DMIC1", "DMIC2"
 };
 
-
 static const char * const anc_mux_text[] = {
 	"ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "ADC_MB",
 		"RSVD_1", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6"
@@ -1076,14 +1082,16 @@
 	dec_name = strsep(&widget_name, " ");
 	widget_name = temp;
 	if (!dec_name) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, w->name);
 		ret =  -EINVAL;
 		goto out;
 	}
 
 	ret = kstrtouint(strpbrk(dec_name, "12"), 10, &decimator);
 	if (ret < 0) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
+		dev_err(codec->dev, "%s: Invalid decimator = %s\n",
+			__func__, dec_name);
 		ret =  -EINVAL;
 		goto out;
 	}
@@ -1100,7 +1108,8 @@
 			adc_dmic_sel = 0x0;
 		break;
 	default:
-		pr_err("%s: Invalid Decimator = %u\n", __func__, decimator);
+		dev_err(codec->dev, "%s: Invalid Decimator = %u\n",
+			__func__, decimator);
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1204,18 +1213,18 @@
 static void msm8x10_wcd_codec_enable_adc_block(struct snd_soc_codec *codec,
 					 int enable)
 {
-	struct msm8x10_wcd_priv *taiko = snd_soc_codec_get_drvdata(codec);
+	struct msm8x10_wcd_priv *wcd8x10 = snd_soc_codec_get_drvdata(codec);
 
-	pr_debug("%s %d\n", __func__, enable);
+	dev_dbg(codec->dev, "%s %d\n", __func__, enable);
 
 	if (enable) {
-		taiko->adc_count++;
+		wcd8x10->adc_count++;
 		snd_soc_update_bits(codec,
 				    MSM8X10_WCD_A_CDC_ANA_CLK_CTL,
 				    0x20, 0x20);
 	} else {
-		taiko->adc_count--;
-		if (!taiko->adc_count)
+		wcd8x10->adc_count--;
+		if (!wcd8x10->adc_count)
 			snd_soc_update_bits(codec,
 					    MSM8X10_WCD_A_CDC_ANA_CLK_CTL,
 					    0x20, 0x0);
@@ -1229,7 +1238,7 @@
 	u16 adc_reg;
 	u8 init_bit_shift;
 
-	pr_debug("%s %d\n", __func__, event);
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
 	adc_reg = MSM8X10_WCD_A_TX_1_2_TEST_CTL;
 
 	if (w->reg == MSM8X10_WCD_A_TX_1_EN)
@@ -1237,7 +1246,8 @@
 	else if (adc_reg == MSM8X10_WCD_A_TX_2_EN)
 		init_bit_shift = 6;
 	else {
-		pr_err("%s: Error, invalid adc register\n", __func__);
+		dev_err(codec->dev, "%s: Error, invalid adc register\n",
+			__func__);
 		return -EINVAL;
 	}
 
@@ -1263,14 +1273,15 @@
 	struct snd_soc_codec *codec = w->codec;
 	u16 lineout_gain_reg;
 
-	pr_debug("%s %d %s\n", __func__, event, w->name);
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
 
 	switch (w->shift) {
 	case 0:
 		lineout_gain_reg = MSM8X10_WCD_A_RX_LINE_1_GAIN;
 		break;
 	default:
-		pr_err("%s: Error, incorrect lineout register value\n",
+		dev_err(codec->dev,
+			"%s: Error, incorrect lineout register value\n",
 			__func__);
 		return -EINVAL;
 	}
@@ -1280,8 +1291,8 @@
 		snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x40);
 		break;
 	case SND_SOC_DAPM_POST_PMU:
-		pr_debug("%s: sleeping 16 ms after %s PA turn on\n",
-				__func__, w->name);
+		dev_dbg(codec->dev, "%s: sleeping 16 ms after %s PA turn on\n",
+			__func__, w->name);
 		usleep_range(16000, 16100);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
@@ -1294,7 +1305,7 @@
 static int msm8x10_wcd_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
 				     struct snd_kcontrol *kcontrol, int event)
 {
-	pr_debug("%s %d %s\n", __func__, event, w->name);
+	dev_dbg(w->codec->dev, "%s %d %s\n", __func__, event, w->name);
 	return 0;
 }
 
@@ -1311,7 +1322,8 @@
 
 	ret = kstrtouint(strpbrk(w->name, "12"), 10, &dmic);
 	if (ret < 0) {
-		pr_err("%s: Invalid DMIC line on the codec\n", __func__);
+		dev_err(codec->dev,
+			"%s: Invalid DMIC line on the codec\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1321,11 +1333,12 @@
 		dmic_clk_en = 0x01;
 		dmic_clk_cnt = &(msm8x10_wcd->dmic_1_2_clk_cnt);
 		dmic_clk_reg = MSM8X10_WCD_A_CDC_CLK_DMIC_B1_CTL;
-		pr_debug("%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
+		dev_dbg(codec->dev,
+			"%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
 			__func__, event,  dmic, *dmic_clk_cnt);
 		break;
 	default:
-		pr_err("%s: Invalid DMIC Selection\n", __func__);
+		dev_err(codec->dev, "%s: Invalid DMIC Selection\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1360,7 +1373,7 @@
 	char *internal3_text = "Internal3";
 	enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on;
 
-	pr_debug("%s %d\n", __func__, event);
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
 	switch (w->reg) {
 	case MSM8X10_WCD_A_MICB_1_CTL:
 		micb_int_reg = MSM8X10_WCD_A_MICB_1_INT_RBIAS;
@@ -1371,7 +1384,8 @@
 		e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF;
 		break;
 	default:
-		pr_err("%s: Error, invalid micbias register\n", __func__);
+		dev_err(codec->dev,
+			"%s: Error, invalid micbias register\n", __func__);
 		return -EINVAL;
 	}
 
@@ -1432,7 +1446,7 @@
 	u8 dec_hpf_cut_of_freq;
 	int offset;
 
-	pr_debug("%s %d\n", __func__, event);
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
 
 	widget_name = kstrndup(w->name, 15, GFP_KERNEL);
 	if (!widget_name)
@@ -1442,26 +1456,29 @@
 	dec_name = strsep(&widget_name, " ");
 	widget_name = temp;
 	if (!dec_name) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, w->name);
+		dev_err(codec->dev,
+			"%s: Invalid decimator = %s\n", __func__, w->name);
 		ret = -EINVAL;
 		goto out;
 	}
 
 	ret = kstrtouint(strpbrk(dec_name, "12"), 10, &decimator);
 	if (ret < 0) {
-		pr_err("%s: Invalid decimator = %s\n", __func__, dec_name);
+		dev_err(codec->dev,
+			"%s: Invalid decimator = %s\n", __func__, dec_name);
 		ret = -EINVAL;
 		goto out;
 	}
 
-	pr_debug("%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
-			w->name, dec_name, decimator);
+	dev_dbg(codec->dev,
+		"%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
+		w->name, dec_name, decimator);
 
 	if (w->reg == MSM8X10_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL) {
 		dec_reset_reg = MSM8X10_WCD_A_CDC_CLK_TX_RESET_B1_CTL;
 		offset = 0;
 	} else {
-		pr_err("%s: Error, incorrect dec\n", __func__);
+		dev_err(codec->dev, "%s: Error, incorrect dec\n", __func__);
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1531,11 +1548,12 @@
 }
 
 static int msm8x10_wcd_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *kcontrol, int event)
+						 struct snd_kcontrol *kcontrol,
+						 int event)
 {
 	struct snd_soc_codec *codec = w->codec;
 
-	pr_debug("%s %d %s\n", __func__, event, w->name);
+	dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
@@ -1565,7 +1583,7 @@
 	struct snd_soc_codec *codec = w->codec;
 	struct msm8x10_wcd_priv *msm8x10_wcd = snd_soc_codec_get_drvdata(codec);
 
-	pr_debug("%s %d\n", __func__, event);
+	dev_dbg(codec->dev, "%s %d\n", __func__, event);
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
@@ -1583,7 +1601,7 @@
 {
 	struct snd_soc_codec *codec = w->codec;
 
-	pr_debug("%s %s %d\n", __func__, w->name, event);
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
@@ -1603,7 +1621,7 @@
 	struct msm8x10_wcd_priv *msm8x10_wcd = snd_soc_codec_get_drvdata(codec);
 	enum wcd9xxx_notify_event e_pre_on, e_post_off;
 
-	pr_debug("%s: %s event = %d\n", __func__, w->name, event);
+	dev_dbg(codec->dev, "%s: %s event = %d\n", __func__, w->name, event);
 	if (w->shift == 5) {
 		e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON;
 		e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF;
@@ -1611,7 +1629,8 @@
 		e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON;
 		e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF;
 	} else {
-		pr_err("%s: Invalid w->shift %d\n", __func__, w->shift);
+		dev_err(codec->dev,
+			"%s: Invalid w->shift %d\n", __func__, w->shift);
 		return -EINVAL;
 	}
 
@@ -1635,8 +1654,9 @@
 		 * would have been locked while snd_soc_jack_report also
 		 * attempts to acquire same lock.
 		 */
-		pr_debug("%s: sleep 10 ms after %s PA disable.\n", __func__,
-			 w->name);
+		dev_dbg(codec->dev,
+			"%s: sleep 10 ms after %s PA disable.\n", __func__,
+			w->name);
 		usleep_range(10000, 10100);
 		break;
 	}
@@ -1648,7 +1668,7 @@
 {
 	struct snd_soc_codec *codec = w->codec;
 
-	pr_debug("%s %s %d\n", __func__, w->name, event);
+	dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
 
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
@@ -1665,7 +1685,7 @@
 static int msm8x10_wcd_spk_dac_event(struct snd_soc_dapm_widget *w,
 	struct snd_kcontrol *kcontrol, int event)
 {
-	pr_debug("%s %s %d\n", __func__, w->name, event);
+	dev_dbg(w->codec->dev, "%s %s %d\n", __func__, w->name, event);
 	return 0;
 }
 
@@ -1812,14 +1832,14 @@
 	{"MIC BIAS1 External", NULL, "LDO_H"},
 };
 
-
 static int msm8x10_wcd_startup(struct snd_pcm_substream *substream,
 		struct snd_soc_dai *dai)
 {
 	struct msm8x10_wcd *msm8x10_wcd_core =
 		dev_get_drvdata(dai->codec->dev);
-	pr_debug("%s(): substream = %s  stream = %d\n" , __func__,
-		 substream->name, substream->stream);
+	dev_dbg(dai->codec->dev, "%s(): substream = %s  stream = %d\n",
+		__func__,
+		substream->name, substream->stream);
 	if ((msm8x10_wcd_core != NULL) &&
 	    (msm8x10_wcd_core->dev != NULL))
 		pm_runtime_get_sync(msm8x10_wcd_core->dev);
@@ -1832,8 +1852,9 @@
 {
 	struct msm8x10_wcd *msm8x10_wcd_core =
 		dev_get_drvdata(dai->codec->dev);
-	pr_debug("%s(): substream = %s  stream = %d\n" , __func__,
-		 substream->name, substream->stream);
+	dev_dbg(dai->codec->dev,
+		"%s(): substream = %s  stream = %d\n" , __func__,
+		substream->name, substream->stream);
 	if ((msm8x10_wcd_core != NULL) &&
 	    (msm8x10_wcd_core->dev != NULL)) {
 		pm_runtime_mark_last_busy(msm8x10_wcd_core->dev);
@@ -1846,9 +1867,9 @@
 {
 	struct msm8x10_wcd_priv *msm8x10_wcd = snd_soc_codec_get_drvdata(codec);
 
-	pr_debug("%s: mclk_enable = %u, dapm = %d\n", __func__, mclk_enable,
-		 dapm);
-
+	dev_dbg(codec->dev,
+		"%s: mclk_enable = %u, dapm = %d\n", __func__,
+		mclk_enable, dapm);
 	WCD9XXX_BCL_LOCK(&msm8x10_wcd->resmgr);
 	if (mclk_enable) {
 		wcd9xxx_resmgr_get_bandgap(&msm8x10_wcd->resmgr,
@@ -1870,13 +1891,13 @@
 static int msm8x10_wcd_set_dai_sysclk(struct snd_soc_dai *dai,
 		int clk_id, unsigned int freq, int dir)
 {
-	pr_debug("%s\n", __func__);
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
 	return 0;
 }
 
 static int msm8x10_wcd_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
-	pr_debug("%s\n", __func__);
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
 	return 0;
 }
 
@@ -1885,7 +1906,7 @@
 				unsigned int rx_num, unsigned int *rx_slot)
 
 {
-	pr_debug("%s\n", __func__);
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
 	return 0;
 }
 
@@ -1894,7 +1915,7 @@
 				 unsigned int *rx_num, unsigned int *rx_slot)
 
 {
-	pr_debug("%s\n", __func__);
+	dev_dbg(dai->codec->dev, "%s\n", __func__);
 	return 0;
 }
 
@@ -1917,7 +1938,8 @@
 	u8 tx_fs_rate, rx_fs_rate;
 	int ret;
 
-	pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
+	dev_dbg(dai->codec->dev,
+		"%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__,
 		 dai->name, dai->id, params_rate(params),
 		 params_channels(params));
 
@@ -1947,7 +1969,8 @@
 		rx_fs_rate = 0xA0;
 		break;
 	default:
-		pr_err("%s: Invalid sampling rate %d\n", __func__,
+		dev_err(dai->codec->dev,
+			"%s: Invalid sampling rate %d\n", __func__,
 			params_rate(params));
 		return -EINVAL;
 	}
@@ -1957,7 +1980,8 @@
 		ret = msm8x10_wcd_set_decimator_rate(dai, tx_fs_rate,
 					       params_rate(params));
 		if (ret < 0) {
-			pr_err("%s: set decimator rate failed %d\n", __func__,
+			dev_err(dai->codec->dev,
+				"%s: set decimator rate failed %d\n", __func__,
 				ret);
 			return ret;
 		}
@@ -1966,13 +1990,15 @@
 		ret = msm8x10_wcd_set_interpolator_rate(dai, rx_fs_rate,
 						  params_rate(params));
 		if (ret < 0) {
-			pr_err("%s: set decimator rate failed %d\n", __func__,
+			dev_err(dai->codec->dev,
+				"%s: set decimator rate failed %d\n", __func__,
 				ret);
 			return ret;
 		}
 		break;
 	default:
-		pr_err("%s: Invalid stream type %d\n", __func__,
+		dev_err(dai->codec->dev,
+			"%s: Invalid stream type %d\n", __func__,
 			substream->stream);
 		return -EINVAL;
 	}
@@ -2026,13 +2052,15 @@
 {
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
-		pr_debug("%s: Sleeping 20ms after enabling EAR PA\n",
-				 __func__);
+		dev_dbg(w->codec->dev,
+			"%s: Sleeping 20ms after enabling EAR PA\n",
+			__func__);
 		msleep(20);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
-		pr_debug("%s: Sleeping 20ms after disabling EAR PA\n",
-				 __func__);
+		dev_dbg(w->codec->dev,
+			"%s: Sleeping 20ms after disabling EAR PA\n",
+			__func__);
 		msleep(20);
 		break;
 	}
@@ -2310,11 +2338,12 @@
 
 static int msm8x10_wcd_codec_probe(struct snd_soc_codec *codec)
 {
-	msm8x10_wcd_codec_init_reg(codec);
+	dev_dbg(codec->dev, "%s()\n", __func__);
 
+	codec->control_data = dev_get_drvdata(codec->dev);
+	msm8x10_wcd_codec_init_reg(codec);
 	msm8x10_wcd_update_reg_defaults(codec);
 
-	dev_dbg(codec->dev, "%s()\n", __func__);
 
 	return 0;
 }
@@ -2324,6 +2353,18 @@
 	return 0;
 }
 
+static int msm8x10_wcd_device_init(struct msm8x10_wcd *msm8x10)
+{
+
+	mutex_init(&msm8x10->io_lock);
+	mutex_init(&msm8x10->xfer_lock);
+	mutex_init(&msm8x10->pm_lock);
+	msm8x10->wlock_holders = 0;
+
+	return 0;
+}
+
+
 static struct snd_soc_codec_driver soc_codec_dev_msm8x10_wcd = {
 	.probe	= msm8x10_wcd_codec_probe,
 	.remove	= msm8x10_wcd_codec_remove,
@@ -2349,8 +2390,21 @@
 static int __devinit msm8x10_wcd_i2c_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
-	int ret;
+	int ret = 0;
+	struct msm8x10_wcd *msm8x10 = NULL;
 	struct msm8x10_wcd_pdata *pdata;
+	static int device_id;
+	struct device *dev;
+
+	dev_dbg(&client->dev, "%s:slave addr = 0x%x device_id = %d\n",
+		__func__, client->addr, device_id);
+
+	if (device_id > 0) {
+		msm8x10_wcd_modules[device_id++].client = client;
+		return ret;
+	}
+
+	dev = &client->dev;
 	if (client->dev.of_node) {
 		dev_dbg(&client->dev, "%s:Platform data from device tree\n",
 			__func__);
@@ -2362,16 +2416,50 @@
 		pdata = client->dev.platform_data;
 	}
 
-	ret = snd_soc_register_codec(&client->dev,
-		&soc_codec_dev_msm8x10_wcd,
-		msm8x10_wcd_i2s_dai, ARRAY_SIZE(msm8x10_wcd_i2s_dai));
-	dev_dbg(&client->dev, "%s:ret = 0x%x\n", __func__, ret);
+	msm8x10 = kzalloc(sizeof(struct msm8x10_wcd), GFP_KERNEL);
+	if (msm8x10 == NULL) {
+		dev_err(&client->dev,
+			"%s: error, allocation failed\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
 
+	msm8x10->dev = &client->dev;
+	msm8x10_wcd_modules[device_id++].client = client;
+	msm8x10->read_dev = msm8x10_wcd_reg_read;
+	msm8x10->write_dev = msm8x10_wcd_reg_write;
+	ret = msm8x10_wcd_device_init(msm8x10);
+	if (ret) {
+		dev_err(&client->dev,
+			"%s:msm8x10_wcd_device_init failed with error %d\n",
+			__func__, ret);
+		goto fail;
+	}
+	dev_set_drvdata(&client->dev, msm8x10);
+	ret = snd_soc_register_codec(&client->dev, &soc_codec_dev_msm8x10_wcd,
+				     msm8x10_wcd_i2s_dai,
+				     ARRAY_SIZE(msm8x10_wcd_i2s_dai));
+	if (ret)
+		dev_err(&client->dev,
+			"%s:snd_soc_register_codec failed with error %d\n",
+			__func__, ret);
+fail:
 	return ret;
 }
 
+static void msm8x10_wcd_device_exit(struct msm8x10_wcd *msm8x10)
+{
+	mutex_destroy(&msm8x10->pm_lock);
+	mutex_destroy(&msm8x10->io_lock);
+	mutex_destroy(&msm8x10->xfer_lock);
+	kfree(msm8x10);
+}
+
 static int __devexit msm8x10_wcd_i2c_remove(struct i2c_client *client)
 {
+	struct msm8x10_wcd *msm8x10 = dev_get_drvdata(&client->dev);
+
+	msm8x10_wcd_device_exit(msm8x10);
 	return 0;
 }
 
@@ -2407,8 +2495,8 @@
 	pr_debug("%s:\n", __func__);
 	ret = i2c_add_driver(&msm8x10_wcd_i2c_driver);
 	if (ret != 0)
-		pr_err("%s: Failed to add msm8x10 wcd I2C driver - error code %d\n",
-			   __func__, ret);
+		pr_err("%s: Failed to add msm8x10 wcd I2C driver - error %d\n",
+		       __func__, ret);
 	return ret;
 }
 
diff --git a/sound/soc/codecs/msm8x10-wcd.h b/sound/soc/codecs/msm8x10-wcd.h
index 365d526..44e8a6d 100644
--- a/sound/soc/codecs/msm8x10-wcd.h
+++ b/sound/soc/codecs/msm8x10-wcd.h
@@ -196,6 +196,10 @@
 	u8 version;
 
 	int reset_gpio;
+	int (*read_dev)(struct msm8x10_wcd *msm8x10,
+			unsigned short reg, unsigned int *val);
+	int (*write_dev)(struct msm8x10_wcd *msm8x10,
+			 unsigned short reg, unsigned int val);
 
 	u32 num_of_supplies;
 	struct regulator_bulk_data *supplies;
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 0b26a56..25d3f56 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -487,16 +487,16 @@
 	SOC_ENUM_SINGLE(TAPAN_A_CDC_TX4_MUX_CTL, 4, 3, cf_text);
 
 static const struct soc_enum cf_rxmix1_enum =
-	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX1_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix2_enum =
-	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX2_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix3_enum =
-	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX3_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix4_enum =
-	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX4_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAPAN_A_CDC_RX4_B4_CTL, 0, 3, cf_text);
 
 static const struct snd_kcontrol_new tapan_snd_controls[] = {
 
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index f48dbf1..b3d4901 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -829,25 +829,25 @@
 	SOC_ENUM_SINGLE(TAIKO_A_CDC_TX10_MUX_CTL, 4, 3, cf_text);
 
 static const struct soc_enum cf_rxmix1_enum =
-	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX1_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix2_enum =
-	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX2_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix3_enum =
-	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX3_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix4_enum =
-	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX4_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX4_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix5_enum =
-	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX5_B4_CTL, 1, 3, cf_text)
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX5_B4_CTL, 0, 3, cf_text)
 ;
 static const struct soc_enum cf_rxmix6_enum =
-	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX6_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX6_B4_CTL, 0, 3, cf_text);
 
 static const struct soc_enum cf_rxmix7_enum =
-	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX7_B4_CTL, 1, 3, cf_text);
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_RX7_B4_CTL, 0, 3, cf_text);
 
 static const char * const class_h_dsm_text[] = {
 	"ZERO", "DSM_HPHL_RX1", "DSM_SPKR_RX7"
@@ -932,12 +932,6 @@
 	SOC_SINGLE_TLV("ADC5 Volume", TAIKO_A_TX_5_6_EN, 5, 3, 0, analog_gain),
 	SOC_SINGLE_TLV("ADC6 Volume", TAIKO_A_TX_5_6_EN, 1, 3, 0, analog_gain),
 
-
-	SOC_SINGLE("MICBIAS1 CAPLESS Switch", TAIKO_A_MICB_1_CTL, 4, 1, 1),
-	SOC_SINGLE("MICBIAS2 CAPLESS Switch", TAIKO_A_MICB_2_CTL, 4, 1, 1),
-	SOC_SINGLE("MICBIAS3 CAPLESS Switch", TAIKO_A_MICB_3_CTL, 4, 1, 1),
-	SOC_SINGLE("MICBIAS4 CAPLESS Switch", TAIKO_A_MICB_4_CTL, 4, 1, 1),
-
 	SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 0, 100, taiko_get_anc_slot,
 		taiko_put_anc_slot),
 	SOC_ENUM("TX1 HPF cut off", cf_dec1_enum),
diff --git a/sound/soc/msm/mdm9625.c b/sound/soc/msm/mdm9625.c
index 8eac69e..2bef1b7 100644
--- a/sound/soc/msm/mdm9625.c
+++ b/sound/soc/msm/mdm9625.c
@@ -27,6 +27,7 @@
 #include <mach/socinfo.h>
 #include <qdsp6v2/msm-pcm-routing-v2.h>
 #include "../codecs/wcd9320.h"
+#include <linux/io.h>
 
 /* Spk control */
 #define MDM9625_SPK_ON 1
@@ -38,9 +39,16 @@
 #define MDM_MCLK_CLK_12P288MHZ 12288000
 #define MDM_MCLK_CLK_9P6HZ 9600000
 #define MDM_IBIT_CLK_DIV_1P56MHZ 7
-#define MDM_MI2S_PRIM_INTF 0
-#define MDM_MI2S_SEC_INTF  1
+#define MDM_MI2S_AUXPCM_PRIM_INTF 0
+#define MDM_MI2S_AUXPCM_SEC_INTF  1
 
+#define LPAIF_OFFSET 0xFE000000
+#define LPAIF_PRI_MODE_MUXSEL (LPAIF_OFFSET + 0x2B000)
+#define LPAIF_SEC_MODE_MUXSEL (LPAIF_OFFSET + 0x2C000)
+
+#define I2S_SEL 0
+#define I2S_PCM_SEL 1
+#define I2S_PCM_SEL_OFFSET 1
 
 /* Machine driver Name*/
 #define MDM9625_MACHINE_DRV_NAME "mdm9625-asoc-taiko"
@@ -77,6 +85,8 @@
 #define GPIO_NAME_INDEX 0
 #define DT_PARSE_INDEX  1
 
+static int mdm9625_auxpcm_rate = 8000;
+void *lpaif_pri_muxsel_virt_addr;
 
 static char *mdm_i2s_gpio_name[][2] = {
 	 {"PRIM_MI2S_WS",   "prim-i2s-gpio-ws"},
@@ -93,6 +103,7 @@
 static int mdm9625_mi2s_rx_ch = 1;
 static int mdm9625_mi2s_tx_ch = 1;
 static int msm_spk_control;
+static atomic_t aux_ref_count;
 static atomic_t mi2s_ref_count;
 
 static int mdm9625_enable_codec_ext_clk(struct snd_soc_codec *codec,
@@ -117,7 +128,8 @@
 #define WCD9XXX_MBHC_DEF_BUTTONS 8
 #define WCD9XXX_MBHC_DEF_RLOADS 5
 
-static int mdm9625_set_mi2s_gpio(struct snd_pcm_substream *substream, u32 intf)
+static int mdm9625_set_gpio(struct snd_pcm_substream *substream,
+			    u32 intf)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_card *card = rtd->card;
@@ -134,8 +146,9 @@
 		goto err;
 	}
 
-	if (intf == MDM_MI2S_PRIM_INTF)
+	if (intf == MDM_MI2S_AUXPCM_PRIM_INTF) {
 		i2s_ctrl = pdata->pri_ctrl;
+	}
 	else {
 		pr_err("%s: Wrong I2S Interface\n", __func__);
 		rtn = -EINVAL;
@@ -186,8 +199,9 @@
 		rtn = -EINVAL;
 		goto err;
 	}
-	if (intf == MDM_MI2S_PRIM_INTF)
+	if (intf == MDM_MI2S_AUXPCM_PRIM_INTF) {
 		i2s_ctrl = pdata->pri_ctrl;
+	}
 	else {
 		pr_debug("%s: Wrong Interface\n", __func__);
 		rtn = -EINVAL;
@@ -264,7 +278,7 @@
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	int ret;
 	if (atomic_dec_return(&mi2s_ref_count) == 0) {
-		mdm9625_mi2s_free_gpios(substream, MDM_MI2S_PRIM_INTF);
+		mdm9625_mi2s_free_gpios(substream, MDM_MI2S_AUXPCM_PRIM_INTF);
 		ret = mdm9625_mi2s_clk_ctl(rtd, false);
 		if (ret < 0)
 			pr_err("%s:clock disable failed\n", __func__);
@@ -279,7 +293,17 @@
 	int ret = 0;
 
 	if (atomic_inc_return(&mi2s_ref_count) == 1) {
-		mdm9625_set_mi2s_gpio(substream, MDM_MI2S_PRIM_INTF);
+		if (lpaif_pri_muxsel_virt_addr != NULL)
+			iowrite32(I2S_SEL << I2S_PCM_SEL_OFFSET,
+				  lpaif_pri_muxsel_virt_addr);
+		else
+			pr_err("%s lpaif_pri_muxsel_virt_addr is NULL\n",
+				__func__);
+		ret = mdm9625_set_gpio(substream, MDM_MI2S_AUXPCM_PRIM_INTF);
+		if (ret < 0) {
+			pr_err("%s, GPIO setup failed\n", __func__);
+			return ret;
+		}
 		ret = mdm9625_mi2s_clk_ctl(rtd, true);
 		if (ret < 0) {
 			pr_err("set format for codec dai failed\n");
@@ -470,6 +494,83 @@
 	return 0;
 }
 
+static int mdm9625_auxpcm_startup(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int ret = 0;
+
+	if (atomic_inc_return(&aux_ref_count) == 1) {
+		if (lpaif_pri_muxsel_virt_addr != NULL)
+			iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
+				  lpaif_pri_muxsel_virt_addr);
+		else
+			pr_err("%s lpaif_pri_muxsel_virt_addr is NULL\n",
+				__func__);
+		ret = mdm9625_set_gpio(substream, MDM_MI2S_AUXPCM_PRIM_INTF);
+		if (ret < 0) {
+			pr_err("%s, GPIO setup failed\n", __func__);
+			return ret;
+		}
+		ret = mdm9625_mi2s_clk_ctl(rtd, true);
+		if (ret < 0) {
+			pr_err("set format for codec dai failed\n");
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static void mdm9625_auxpcm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	int ret;
+
+	if (atomic_dec_return(&aux_ref_count) == 0) {
+		mdm9625_mi2s_free_gpios(substream, MDM_MI2S_AUXPCM_PRIM_INTF);
+		ret = mdm9625_mi2s_clk_ctl(rtd, false);
+		if (ret < 0)
+			pr_err("%s:clock disable failed\n", __func__);
+	}
+}
+
+static int mdm9625_auxpcm_rate_get(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = mdm9625_auxpcm_rate;
+	return 0;
+}
+
+static int mdm9625_auxpcm_rate_put(struct snd_kcontrol *kcontrol,
+				   struct snd_ctl_elem_value *ucontrol)
+{
+	switch (ucontrol->value.integer.value[0]) {
+	case 0:
+		mdm9625_auxpcm_rate = 8000;
+		break;
+	case 1:
+		mdm9625_auxpcm_rate = 16000;
+		break;
+	default:
+		mdm9625_auxpcm_rate = 8000;
+		break;
+	}
+	return 0;
+}
+
+static int mdm9625_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					  struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate =
+		hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+
+	struct snd_interval *channels =
+		hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	rate->min = rate->max = mdm9625_auxpcm_rate;
+	channels->min = channels->max = 1;
+
+	return 0;
+}
 
 static const struct snd_soc_dapm_widget mdm9625_dapm_widgets[] = {
 
@@ -494,23 +595,28 @@
 static const char *const spk_function[] = {"Off", "On"};
 static const char *const mi2s_rx_ch_text[] = {"One", "Two"};
 static const char *const mi2s_tx_ch_text[] = {"One", "Two"};
+static const char *const auxpcm_rate_text[] = {"rate_8000", "rate_16000"};
 
 static const struct soc_enum mdm9625_enum[] = {
 	SOC_ENUM_SINGLE_EXT(2, spk_function),
 	SOC_ENUM_SINGLE_EXT(2, mi2s_rx_ch_text),
 	SOC_ENUM_SINGLE_EXT(2, mi2s_tx_ch_text),
+	SOC_ENUM_SINGLE_EXT(2, auxpcm_rate_text),
 };
 
 static const struct snd_kcontrol_new mdm_snd_controls[] = {
-	SOC_ENUM_EXT("Speaker Function", mdm9625_enum[0],
+	SOC_ENUM_EXT("Speaker Function",   mdm9625_enum[0],
 				 mdm9625_mi2s_get_spk,
 				 mdm9625_mi2s_set_spk),
-	SOC_ENUM_EXT("MI2S_RX Channels", mdm9625_enum[1],
+	SOC_ENUM_EXT("MI2S_RX Channels",   mdm9625_enum[1],
 				 mdm9625_mi2s_rx_ch_get,
 				 mdm9625_mi2s_rx_ch_put),
-	SOC_ENUM_EXT("MI2S_TX Channels", mdm9625_enum[2],
+	SOC_ENUM_EXT("MI2S_TX Channels",   mdm9625_enum[2],
 				 mdm9625_mi2s_tx_ch_get,
 				 mdm9625_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("AUX PCM SampleRate", mdm9625_enum[3],
+				 mdm9625_auxpcm_rate_get,
+				 mdm9625_auxpcm_rate_put),
 };
 
 static int mdm9625_mi2s_audrx_init(struct snd_soc_pcm_runtime *rtd)
@@ -631,6 +737,11 @@
 	.shutdown = mdm9625_mi2s_snd_shutdown,
 };
 
+static struct snd_soc_ops mdm9625_auxpcm_be_ops = {
+	.startup = mdm9625_auxpcm_startup,
+	.shutdown = mdm9625_auxpcm_snd_shutdown,
+};
+
 /* Digital audio interface connects codec <---> CPU */
 static struct snd_soc_dai_link mdm9625_dai[] = {
 	/* FrontEnd DAI Links */
@@ -638,7 +749,7 @@
 		.name = "MDM9625 Media1",
 		.stream_name = "MultiMedia1",
 		.cpu_dai_name = "MultiMedia1",
-		.platform_name  = "msm-pcm-dsp",
+		.platform_name  = "msm-pcm-dsp.0",
 		.dynamic = 1,
 		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
 			    SND_SOC_DPCM_TRIGGER_POST},
@@ -799,6 +910,32 @@
 		.no_pcm = 1,
 		.be_id = MSM_BACKEND_DAI_AFE_PCM_TX,
 	},
+	{
+		.name = LPASS_BE_AUXPCM_RX,
+		.stream_name = "AUX PCM Playback",
+		.cpu_dai_name = "msm-dai-q6.4106",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_AUXPCM_RX,
+		.be_hw_params_fixup = mdm9625_auxpcm_be_params_fixup,
+		.ops = &mdm9625_auxpcm_be_ops,
+		.ignore_pmdown_time = 1,
+		/* this dainlink has playback support */
+	},
+	{
+		.name = LPASS_BE_AUXPCM_TX,
+		.stream_name = "AUX PCM Capture",
+		.cpu_dai_name = "msm-dai-q6.4107",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-tx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_AUXPCM_TX,
+		.be_hw_params_fixup = mdm9625_auxpcm_be_params_fixup,
+		.ops = &mdm9625_auxpcm_be_ops,
+	},
 };
 
 static struct snd_soc_card snd_soc_card_mdm9625 = {
@@ -807,7 +944,7 @@
 	.num_links = ARRAY_SIZE(mdm9625_dai),
 };
 
-static int mdm9625_dtparse_mi2s(struct platform_device *pdev,
+static int mdm9625_dtparse(struct platform_device *pdev,
 				struct mdm9625_machine_data **pdata)
 {
 	int ret = 0, i = 0;
@@ -920,10 +1057,10 @@
 		ret = -ENOMEM;
 		goto err;
 	}
-	ret = mdm9625_dtparse_mi2s(pdev, &pdata);
+	ret = mdm9625_dtparse(pdev, &pdata);
 	if (ret) {
 		dev_err(&pdev->dev,
-			"%s: mi2s Pin data parse failed",
+			"%s: mi2s-aux Pin data parse failed",
 			__func__);
 		goto err;
 	}
@@ -960,6 +1097,14 @@
 				ret);
 		goto err;
 	}
+
+	lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
+	if (lpaif_pri_muxsel_virt_addr == NULL) {
+		pr_err("%s Pri muxsel virt addr is null\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
 	return 0;
 err:
 	devm_kfree(&pdev->dev, pdata);
diff --git a/sound/soc/msm/msm-pcm-host-voice.c b/sound/soc/msm/msm-pcm-host-voice.c
index 7cb309e3..36826cc 100644
--- a/sound/soc/msm/msm-pcm-host-voice.c
+++ b/sound/soc/msm/msm-pcm-host-voice.c
@@ -28,7 +28,7 @@
 
 #include "qdsp6/q6voice.h"
 
-#define HPCM_MAX_Q_LEN 2
+#define HPCM_MAX_Q_LEN 10
 #define HPCM_MIN_VOC_PKT_SIZE 320
 #define HPCM_MAX_VOC_PKT_SIZE 640
 
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 91e5e67..c5cfa11 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -27,7 +27,7 @@
 #include <asm/mach-types.h>
 #include <mach/socinfo.h>
 #include <sound/pcm_params.h>
-#include <qdsp6v2/msm-pcm-routing-v2.h>
+#include "qdsp6v2/msm-pcm-routing-v2.h"
 #include "../codecs/wcd9320.h"
 #include <linux/io.h>
 
diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
index 981a9a7..4dd85fc 100644
--- a/sound/soc/msm/msm8x10.c
+++ b/sound/soc/msm/msm8x10.c
@@ -114,8 +114,8 @@
 		.stream_name = "Primary MI2S Playback",
 		.cpu_dai_name = "msm-dai-q6-mi2s.0",
 		.platform_name = "msm-pcm-routing",
-		.codec_name     = "msm-stub-codec.1",
-		.codec_dai_name = "msm-stub-tx",
+		.codec_name     = "msm8x10-wcd-i2c-core.1-000d",
+		.codec_dai_name = "msm8x10_wcd_i2s_rx1",
 		.no_pcm = 1,
 		.be_id = MSM_BACKEND_DAI_MI2S_RX,
 		.init = &msm_audrx_init,
@@ -127,8 +127,8 @@
 		.stream_name = "Secondary MI2S Capture",
 		.cpu_dai_name = "msm-dai-q6-mi2s.1",
 		.platform_name = "msm-pcm-routing",
-		.codec_name     = "msm-stub-codec.1",
-		.codec_dai_name = "msm-stub-tx",
+		.codec_name     = "msm8x10-wcd-i2c-core.1-000d",
+		.codec_dai_name = "msm8x10_wcd_i2s_tx1",
 		.no_pcm = 1,
 		.be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
 		.be_hw_params_fixup = msm_be_hw_params_fixup,
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 35c215c..a55700c 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -2205,6 +2205,39 @@
 	return -EINVAL;
 }
 
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff)
+{
+	struct asm_aac_stereo_mix_coeff_selection_param aac_mix_coeff;
+	int rc = 0;
+	q6asm_add_hdr(ac, &aac_mix_coeff.hdr, sizeof(aac_mix_coeff), TRUE);
+	aac_mix_coeff.hdr.opcode =
+		ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	aac_mix_coeff.param_id =
+		ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG;
+	aac_mix_coeff.param_size =
+		sizeof(struct asm_aac_stereo_mix_coeff_selection_param);
+	aac_mix_coeff.aac_stereo_mix_coeff_flag	= mix_coeff;
+	pr_debug("%s, mix_coeff = %u", __func__, mix_coeff);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &aac_mix_coeff);
+	if (rc < 0) {
+		pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+			ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout opcode[0x%x]\n", __func__,
+						aac_mix_coeff.hdr.opcode);
+		goto fail_cmd;
+	}
+	return 0;
+fail_cmd:
+	return -EINVAL;
+}
+
 int q6asm_set_encdec_chan_map(struct audio_client *ac,
 			uint32_t num_channels)
 {
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 1969fe8..c14cb74 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -503,6 +503,7 @@
 		rc = -EINVAL;
 	}
 
+	kfree(voice_ocm_work);
 	return;
 }
 /**
@@ -614,6 +615,7 @@
 		rc = -EINVAL;
 	}
 
+	kfree(audio_ocm_work);
 	return;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
index 33b72e8..d0b5500 100644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -50,7 +50,7 @@
 	unsigned volume;
 	atomic_t audio_ocmem_req;
 };
-static struct snd_msm compressed_audio = {NULL, 0x2000} ;
+static struct snd_msm compressed_audio = {NULL, 0x20002000} ;
 
 static struct audio_locks the_locks;
 
@@ -121,6 +121,7 @@
 	int i = 0;
 	int time_stamp_flag = 0;
 	int buffer_length = 0;
+	int stop_playback = 0;
 
 	pr_debug("%s opcode =%08x\n", __func__, opcode);
 	switch (opcode) {
@@ -141,6 +142,22 @@
 			break;
 		} else
 			atomic_set(&prtd->pending_buffer, 0);
+
+		/*
+		 * check for underrun
+		 */
+		snd_pcm_stream_lock_irq(substream);
+		if (snd_pcm_playback_empty(substream)) {
+			runtime->render_flag |= SNDRV_RENDER_STOPPED;
+			stop_playback = 1;
+		}
+		snd_pcm_stream_unlock_irq(substream);
+
+		if (stop_playback) {
+			pr_err("%s empty buffer, stop writes\n", __func__);
+			break;
+		}
+
 		buf = prtd->audio_client->port[IN].buf;
 		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
 				__func__, prtd->pcm_count, prtd->out_head);
@@ -519,6 +536,7 @@
 	}
 	prtd = &compr->prtd;
 	prtd->substream = substream;
+	runtime->render_flag = SNDRV_DMA_MODE;
 	prtd->audio_client = q6asm_audio_client_alloc(
 				(app_cb)compr_event_handler, compr);
 	if (!prtd->audio_client) {
@@ -568,8 +586,9 @@
 {
 	int rc = 0;
 	if (compressed_audio.prtd && compressed_audio.prtd->audio_client) {
-		rc = q6asm_set_volume(compressed_audio.prtd->audio_client,
-								 volume);
+		rc = q6asm_set_lrgain(compressed_audio.prtd->audio_client,
+						(volume >> 16) & 0xFFFF,
+						volume & 0xFFFF);
 		if (rc < 0) {
 			pr_err("%s: Send Volume command failed rc=%d\n",
 						__func__, rc);
@@ -674,6 +693,7 @@
 
 	pr_debug("%s\n", __func__);
 	prtd->mmap_flag = 1;
+	runtime->render_flag = SNDRV_NON_DMA_MODE;
 	if (runtime->dma_addr && runtime->dma_bytes) {
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 		result = remap_pfn_range(vma, vma->vm_start,
@@ -800,6 +820,9 @@
 	}
 	runtime->hw.buffer_bytes_max =
 			runtime->hw.period_bytes_min * runtime->hw.periods_max;
+	pr_debug("allocate %d buffers each of size %d\n",
+		runtime->hw.period_bytes_min,
+		runtime->hw.periods_max);
 	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
 			prtd->audio_client,
 			runtime->hw.period_bytes_min,
@@ -957,6 +980,69 @@
 	return snd_pcm_lib_ioctl(substream, cmd, arg);
 }
 
+static int msm_compr_restart(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct compr_audio *compr = runtime->private_data;
+	struct msm_audio *prtd = &compr->prtd;
+	struct audio_aio_write_param param;
+	struct audio_buffer *buf = NULL;
+	struct output_meta_data_st output_meta_data;
+	int time_stamp_flag = 0;
+	int buffer_length = 0;
+
+	pr_debug("%s, trigger restart\n", __func__);
+
+	if (runtime->render_flag & SNDRV_RENDER_STOPPED) {
+		buf = prtd->audio_client->port[IN].buf;
+		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
+				__func__, prtd->pcm_count, prtd->out_head);
+		pr_debug("%s:writing buffer[%d] from 0x%08x\n",
+				__func__, prtd->out_head,
+				((unsigned int)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count)));
+
+		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+			time_stamp_flag = SET_TIMESTAMP;
+		else
+			time_stamp_flag = NO_TIMESTAMP;
+		memcpy(&output_meta_data, (char *)(buf->data +
+			prtd->out_head * prtd->pcm_count),
+			COMPRE_OUTPUT_METADATA_SIZE);
+
+		buffer_length = output_meta_data.frame_size;
+		pr_debug("meta_data_length: %d, frame_length: %d\n",
+			 output_meta_data.meta_data_length,
+			 output_meta_data.frame_size);
+		pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
+			 output_meta_data.timestamp_msw,
+			 output_meta_data.timestamp_lsw);
+
+		param.paddr = (unsigned long)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count)
+				+ output_meta_data.meta_data_length;
+		param.len = buffer_length;
+		param.msw_ts = output_meta_data.timestamp_msw;
+		param.lsw_ts = output_meta_data.timestamp_lsw;
+		param.flags = time_stamp_flag;
+		param.uid =  (unsigned long)buf[0].phys
+				+ (prtd->out_head * prtd->pcm_count
+				+ output_meta_data.meta_data_length);
+		if (q6asm_async_write(prtd->audio_client,
+					&param) < 0)
+			pr_err("%s:q6asm_async_write failed\n",
+				__func__);
+		else
+			prtd->out_head =
+				(prtd->out_head + 1) & (runtime->periods - 1);
+
+		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
+		return 0;
+	}
+	return 0;
+}
+
+
 static struct snd_pcm_ops msm_compr_ops = {
 	.open	   = msm_compr_open,
 	.hw_params	= msm_compr_hw_params,
@@ -966,6 +1052,7 @@
 	.trigger	= msm_compr_trigger,
 	.pointer	= msm_compr_pointer,
 	.mmap		= msm_compr_mmap,
+	.restart	= msm_compr_restart,
 };
 
 static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
index 74a3af9..91bb09b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-afe-v2.c
@@ -333,8 +333,8 @@
 	prtd->audio_client = q6afe_audio_client_alloc(prtd);
 	if (!prtd->audio_client) {
 		pr_debug("%s: Could not allocate memory\n", __func__);
-		kfree(prtd);
 		mutex_unlock(&prtd->lock);
+		kfree(prtd);
 		return -ENOMEM;
 	}
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index 2fca464..3a4a674 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -366,7 +366,9 @@
 {
 	int rc = 0;
 	if (lpa_audio.prtd && lpa_audio.prtd->audio_client) {
-		rc = q6asm_set_volume(lpa_audio.prtd->audio_client, volume);
+		rc = q6asm_set_lrgain(lpa_audio.prtd->audio_client,
+						(volume >> 16) & 0xFFFF,
+						volume & 0xFFFF);
 		if (rc < 0) {
 			pr_err("%s: Send Volume command failed rc=%d\n",
 					__func__, rc);
@@ -534,8 +536,8 @@
 		memset(&tstamp, 0x0, sizeof(struct snd_compr_tstamp));
 		rc = q6asm_get_session_time(prtd->audio_client, &timestamp);
 		if (rc < 0) {
-			pr_err("%s: Get Session Time return value =%lld\n",
-				__func__, timestamp);
+			pr_err("%s: Fail to get session time stamp, rc:%d\n",
+							__func__, rc);
 			return -EAGAIN;
 		}
 		temp = (timestamp * 2 * runtime->channels);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index c48132e..02c3457 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -58,14 +58,14 @@
 
 #define INT_RX_VOL_MAX_STEPS 0x2000
 #define INT_RX_VOL_GAIN 0x2000
-
+#define INT_RX_LR_VOL_MAX_STEPS 0x20002000
 static int msm_route_fm_vol_control;
 static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0,
 			INT_RX_VOL_MAX_STEPS);
 
 static int msm_route_lpa_vol_control;
 static const DECLARE_TLV_DB_LINEAR(lpa_rx_vol_gain, 0,
-			INT_RX_VOL_MAX_STEPS);
+			INT_RX_LR_VOL_MAX_STEPS);
 
 static int msm_route_multimedia2_vol_control;
 static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
@@ -73,7 +73,7 @@
 
 static int msm_route_compressed_vol_control;
 static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
-			INT_RX_VOL_MAX_STEPS);
+			INT_RX_LR_VOL_MAX_STEPS);
 
 static int msm_route_multimedia5_vol_control;
 static const DECLARE_TLV_DB_LINEAR(multimedia5_rx_vol_gain, 0,
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 913dded..1f2f307 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -223,6 +223,12 @@
 	adm_params->hdr.dest_svc = APR_SVC_ADM;
 	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
 	index = afe_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: invalid port idx %d portid %#x\n",
+				__func__, index, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
 	adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
 	adm_params->hdr.token = port_id;
 	adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 87990a9..5be62cc 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -2091,6 +2091,13 @@
 	return -EINVAL;
 }
 
+/* Support for selecting stereo mixing coefficients for B family not done */
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff)
+{
+	/* To Be Done */
+	return 0;
+}
+
 int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
 		uint16_t min_rate, uint16_t max_rate,
 		uint16_t reduced_rate_level, uint16_t rate_modulation_cmd)
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 6a65880..12e83b0 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -2765,13 +2765,14 @@
 	uint64_t *enc_buf;
 	void *apr_cvs;
 	u16 cvs_handle;
-	dec_buf = (uint64_t *)v->shmem_info.sh_buf.buf[0].phys;
-	enc_buf = (uint64_t *)v->shmem_info.sh_buf.buf[1].phys;
 
 	if (v == NULL) {
 		pr_err("%s: v is NULL\n", __func__);
 		return -EINVAL;
 	}
+	dec_buf = (uint64_t *)v->shmem_info.sh_buf.buf[0].phys;
+	enc_buf = (uint64_t *)v->shmem_info.sh_buf.buf[1].phys;
+
 	apr_cvs = common.apr_q6_cvs;
 
 	if (!apr_cvs) {
@@ -4618,6 +4619,10 @@
 	struct voice_data *v = voice_get_session(
 				common.voice[VOC_PATH_FULL].session_id);
 
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
 	v->shmem_info.sh_buf.client = msm_ion_client_create(UINT_MAX,
 							    "voip_client");
 	if (IS_ERR_OR_NULL((void *)v->shmem_info.sh_buf.client)) {
@@ -4686,6 +4691,10 @@
 	struct voice_data *v = voice_get_session(
 				common.voice[VOC_PATH_FULL].session_id);
 
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
 	v->shmem_info.memtbl.client = msm_ion_client_create(UINT_MAX,
 							      "voip_client");
 	if (IS_ERR_OR_NULL((void *)v->shmem_info.memtbl.client)) {