Merge "usb: u_ether: Add workqueue as bottom half handler for rx data path" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/dcvs-core-info.txt b/Documentation/devicetree/bindings/arm/msm/dcvs-core-info.txt
new file mode 100644
index 0000000..a39356c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/dcvs-core-info.txt
@@ -0,0 +1,78 @@
+DCVS Core Info
+
+This data describes specific DCVS tuning data for a specific core (CPU, GPU,
+etc).
+
+Required properties:
+
+- qcom,core-max-time-us:	Maximum time limit in micorseconds for switching clock rate.
+				Limited to this value if switching time takes longer than this limit. Typical value is 100000.
+- qcom,algo-slack-time-us:	Time in microseconds after which the QoS guarantee will kick in
+				and the clock rate will increased as necessary. Typical value is about 30000.
+- qcom,algo-disable-pc-threshold:	If core frequency (kHz) is higher than this value, power collapse is disallowed. Set to 0 for GPU.
+- qcom,algo-ss-window-size:	Steady state window size in microseconds.
+- qcom,algo-ss-util-pct:	When determining the steady state level, this percentage value is used to provide headroom
+				from the utilized cpu to the selected level.
+- qcom,algo-ee-max-util-pct:	When determining the level with the lowest energy, any level that exceeds this busy
+				percentage, for the measured work in the last window, is disqualified for performance reasons.
+- qcom,algo-ss-iobusy-conv:	Used to convert correlation time into assumed IO Busy time, which is removed
+				from measured elapsed time when computing cpu utilization.
+
+
+A number of frequency levels are represented as sub-nodes:
+
+required properties:
+- reg:			The index of the frequency entry
+- qcom,freq		The frequency of the DVS entry (in kHZ)
+- qcom,idle-energy: 	The idle energy cost of the entry (in micro watts)
+- qcom,active-energy:	The active energy cost of the entry (in micro watts)
+
+Sample:
+
+qcom,kgsl-3d0@fdb00000 {
+	...
+	qcom,dcvs-core-info {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "qcom,dcvs-core-info";
+
+		qcom,core-max-time-us = <100000>;
+		qcom,algo-slack-time-us = <39000>;
+		qcom,algo-disable-pc-threshold = <86000>;
+		qcom,algo-ss-window-size = <1000000>;
+		qcom,algo-ss-util-pct = <95>;
+		qcom,algo-em-max-util-pct = <97>;
+		qcom,algo-ss-iobusy-conv = <100>;
+
+		qcom,dcvs-freq@0 {
+			reg = <0>;
+			qcom,freq = <0>;
+			qcom,idle-energy = <0>;
+			qcom,active-energy = <333932>;
+		};
+
+		qcom,dcvs-freq@1 {
+			reg = <1>;
+			qcom,freq = <0>;
+			qcom,idle-energy = <0>;
+			qcom,active-energy = <497532>;
+		};
+
+		qcom,dcvs-freq@2 {
+			reg = <2>;
+			qcom,freq = <0>;
+			qcom,idle-energy = <0>;
+			qcom,active-energy = <707610>;
+		};
+
+		qcom,dcvs-freq@3 {
+			reg = <3>;
+			qcom,freq = <0>;
+			qcom,idle-energy = <0>;
+			qcom,active-energy = <844545>;
+		};
+	};
+	...
+};
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
new file mode 100644
index 0000000..5c6b804
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -0,0 +1,50 @@
+ION Memory Manager (ION)
+
+ION is a memory manager that allows for sharing of buffers between different
+processes and between user space and kernel space. ION manages different
+memory spaces by separating the memory spaces into "heaps". Depending on the
+type of heap ION must reserve memory using the msm specific memory reservation
+bindings (see Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties
+
+- compatible: "qcom,msm-ion"
+- reg: The ID of the ION heap.
+
+Optional properties
+
+- compatible: "qcom,msm-ion-reserve" This is required if memory is to be reserved
+  as specified by qcom,memory-reservation-size below.
+- qcom,heap-align: Alignment of start of the memory in the heap.
+- qcom,heap-adjacent: ID of heap this heap needs to be adjacent to.
+- qcom,memory-reservation-size: size of reserved memory for the ION heap.
+- qcom,memory-reservation-type: type of memory to be reserved
+(see memory-reserve.txt for information about memory reservations)
+
+Example:
+	qcom,ion {
+                 compatible = "qcom,msm-ion";
+                 #address-cells = <1>;
+                 #size-cells = <0>;
+
+                 qcom,ion-heap@30 { /* SYSTEM HEAP */
+                         reg = <30>;
+                 };
+
+                 qcom,ion-heap@8 { /* CP_MM HEAP */
+                         compatible = "qcom,msm-ion-reserve";
+                         reg = <8>;
+                         qcom,heap-align = <0x1000>;
+                         qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+                         qcom,memory-reservation-size = <0x7800000>;
+                 };
+
+                 qcom,ion-heap@29 { /* FIRMWARE HEAP */
+                         compatible = "qcom,msm-ion-reserve";
+                         reg = <29>;
+                         qcom,heap-align = <0x20000>;
+                         qcom,heap-adjacent = <8>;
+                         qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+                         qcom,memory-reservation-size = <0xA00000>;
+
+	};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
new file mode 100644
index 0000000..6db1150
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -0,0 +1,140 @@
+Qualcomm mdss-dsi-panel
+
+mdss-dsi-panel is a dsi panel driver which supports panels that
+are compatable with MIPI display serial interface specification.
+
+Required properties:
+- compatible:				Must be "qcom,mdss-dsi-panel"
+- status:        			A string that has to be set to "okay/ok" to enable
+					the panel driver. By default this property will be
+					set to "disable". Will be set to "ok/okay" status
+					for specific platforms.
+- qcom,mdss-pan-res:			A two dimensional array that specifies the panel
+					resolution.
+- qcom,mdss-pan-bpp:			Specifies the panel bits per pixel. Default value is 24(rgb888).
+					18 = for rgb666
+					16 = for rgb565
+- qcom,mdss-panel-on-cmds:		An array of variable length that lists the init commands
+					of the panel. Each command will have the format specified
+					as below:
+					--> data type of the command
+					--> specifies whether this command packet is last.
+					--> virtual channel
+					--> Needs acknowledge from the panel or not.
+					--> wait time after the command is transmitter.
+					--> size of payload
+					--> payload.
+- qcom,mdss-panel-off-cmds:		An array of variable length that lists the panel off
+					commands. Each command will have the format specified
+					as below:
+					--> data type of the command
+					--> specifies whether this command packet is last.
+					--> virtual channel
+					--> Needs acknowledge from the panel or not.
+					--> wait time after the command is transmitter.
+					--> size of payload
+					--> payload.
+
+Required structure:
+- A qcom,mdss-dsi-panel node must be a child of an mdss-dsi controller node that links to
+    one of the two DSI controllers.
+
+
+Optional properties:
+- label:		        	A string used as a descriptive name of the panel
+- qcom,mdss-pan-porch-values:		An array of size 6 that specifies the panel blanking values.
+- qcom,mdss-pan-underflow-clr:		Specifies the controller settings for the panel underflow clear
+					settings. Default value is 0xff.
+- qcom,mdss-pan-bl-levels:		Specifies the backlight levels supported by the panel.
+					Default range is 1 to 255.
+
+- qcom,mdss-pan-dsi-mode:		Specifies the panel operating mode.
+					0 = enable video mode(default mode).
+					1 = enable command mode.
+- qcom,mdss-pan-dsi-h-pulse-mode:	Specifies the pulse mode option for the panel.
+					0 = Don't send hsa/he following vs/ve packet(default)
+					1 = Send hsa/he following vs/ve packet
+- qcom,mdss-pan-dsi-h-power-stop:	An Array of size 3 that specifies the power mode
+					during horizontal porch and sync periods of the panel.
+					0 = high speed mode(default mode).
+					1 = Low power mode for horizontal porches and sync pulse.
+- qcom,mdss-pan-dsi-bllp-power-stop:	An Array of size 2 that specifies the power mode
+					during blanking period and after EOF(end of frame).
+					0 = high speed mode(default mode).
+					1 = Low power mode during blanking and EOF.
+- qcom,mdss-pan-dsi-traffic-mode:	Specifies the panel traffic mode.
+					0 = non burst with sync pulses (default mode).
+					1 = non burst with sync start event.
+					2 = burst mode.
+- qcom,mdss-pan-dsi-dst-format:		Specifies the destination format.
+					0 = DSI_VIDEO_DST_FORMAT_RGB565.
+					1 = DSI_VIDEO_DST_FORMAT_RGB666.
+					2 = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE.
+					3 = DSI_VIDEO_DST_FORMAT_RGB888 (Default format)
+					6 = DSI_CMD_DST_FORMAT_RGB565
+					7 = DSI_CMD_DST_FORMAT_RGB666
+					8 = DSI_CMD_DST_FORMAT_RGB888
+- qcom,mdss-pan-dsi-vc:			Specifies the virtual channel identefier.
+					0 = default value.
+- qcom,mdss-pan-dsi-rgb-swap:		Specifies the R, G and B channel ordering.
+					0 = DSI_RGB_SWAP_RGB (default value)
+					1 = DSI_RGB_SWAP_RBG
+					2 = DSI_RGB_SWAP_BGR
+					3 = DSI_RGB_SWAP_BRG
+					4 = DSI_RGB_SWAP_GRB
+					5 = DSI_RGB_SWAP_GBR
+- qcom,mdss-pan-dsi-data-lanes:		An array that specifies the data lanes enabled.
+					<1 1 0 0> = data lanes 1 and 2 are enabled.(default).
+- qcom,mdss-pan-dsi-t-clk:		An array that specifies the byte clock cycles
+					before and after each mode switch.
+- qcom,mdss-pan-dsi-stream:		Specifies the packet stream to be used.
+					0 = stream 0 (default)
+					1 = stream 1
+- qcom,mdss-pan-dsi-mdp-tr:		Specifies the trigger mechanism to be used for MDP path.
+					0 = no trigger
+					2 = Tear check signal line used for trigger
+					4 = Triggered by software (default mode)
+					6 = Software trigger and TE
+- qcom,mdss-pan-dsi-dma-tr:		Specifies the trigger mechanism to be used for DMA path.
+					0 = no trigger
+					2 = Tear check signal line used for trigger
+					4 = Triggered by software (default mode)
+					5 = Software trigger and start/end of frame trigger.
+					6 = Software trigger and TE
+- qcom,mdss-pan-dsi-frame-rate:		Specifies the frame rate for the panel.
+					60 = 60 frames per second (default)
+
+Note, if a given optional qcom,* binding is not present, then the driver will configure
+the default values specified.
+
+Example:
+	qcom,mdss_dsi@fd922800 {
+
+		qcom,mdss_dsi_sim_video {
+			compatible = "qcom,mdss-dsi-panel";
+			label = "simulator video mode dsi panel";
+			status = "disable";
+			qcom,mdss-pan-res = <640 480>;
+			qcom,mdss-pan-bpp = <24>;
+			qcom,mdss-pan-porch-values = <6 2 6 6 2 6>;
+			qcom,mdss-pan-underflow-clr = <0xff>;
+			qcom,mdss-pan-bl-levels = <1 15>;
+			qcom,mdss-pan-dsi-mode = <0>;
+			qcom,mdss-pan-dsi-h-pulse-mode = <1>;
+			qcom,mdss-pan-dsi-h-power-stop = <1 1 1>;
+			qcom,mdss-pan-dsi-bllp-power-stop = <1 1>;
+			qcom,mdss-pan-dsi-traffic-mode = <0>;
+			qcom,mdss-pan-dsi-dst-format = <3>;
+			qcom,mdss-pan-dsi-vc = <0>;
+			qcom,mdss-pan-dsi-rgb-swap = <0>;
+			qcom,mdss-pan-dsi-data-lanes = <1 1 0 0>;
+			qcom,mdss-pan-dsi-t-clk = <0x24 0x03>;
+			qcom,mdss-pan-dsi-stream = <0>;
+			qcom,mdss-pan-dsi-mdp-tr = <0x04>;
+			qcom,mdss-pan-dsi-dma-tr = <0x04>;
+			qcom,mdss-pan-frame-rate = <60>;
+			qcom,panel-on-cmds = [32 01 00 00 00 02 00 00];
+			qcom,panel-off-cmds = [22 01 00 00 00 00 00];
+		};
+
+	};
diff --git a/Documentation/devicetree/bindings/gpu/adreno-pwrlevels.txt b/Documentation/devicetree/bindings/gpu/adreno-pwrlevels.txt
new file mode 100644
index 0000000..d50a21c
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/adreno-pwrlevels.txt
@@ -0,0 +1,40 @@
+Adreno Power Levels
+
+The Adreno GPU definition should include a variable number of power levels
+defining the GPU and bus frequencies for the levels that the GPU can operate at.
+
+Required properties:
+
+- compatible:	The compatible name for the object (qcom,gpu-pwrlevels)
+
+Each powerlevel definition is as follows:
+
+- reg:              Index of the power level (lower is considered higher
+		    performance)
+- qcom,gpu-freq:    The GPU frequency for the power level (in HZ)
+- qcom,bus-freq:    An index representing the bus scaling usecase appropriate
+		    for the power level
+- qcom,io-fraction: A number indicating the fraction of the CPU I/O busy that
+		    this operating point should represent.
+
+Sample usage:
+
+qcom,kgsl-3d0@fdb00000 {
+	...
+	qcom,gpu-pwrlevels {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		compatible = "qcom,gpu-pwrlevels";
+
+		qcom,gpu-pwrlevel@0 {
+			reg = <0>;
+			qcom,gpu-freq = <5000000000>;
+			qcom,bus-freq = <3>;
+			qcom,io_fraction = <0>;
+		};
+	};
+
+	...
+};
+
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
new file mode 100644
index 0000000..16925fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -0,0 +1,159 @@
+Qualcomm GPU
+
+Qualcomm Adreno GPU
+
+Required properties:
+- label:		A string used as a descriptive name for the device.
+- compatible:		Must be "qcom,kgsl-3d0" and "qcom,kgsl-3d"
+- reg:			Specifies the base address and address size for this device.
+- interrupts:		Interrupt mapping for GPU IRQ.
+- interrupt-names:	String property to describe the name of the interrupt.
+- qcom,id:		An integer used as an identification number for the device.
+
+- qcom,clk-map:		A bit map value for clocks controlled by kgsl.
+				KGSL_CLK_SRC    0x00000001
+				KGSL_CLK_CORE   0x00000002
+				KGSL_CLK_IFACE  0x00000004
+				KGSL_CLK_MEM    0x00000008
+				KGSL_CLK_MEM_IFACE 0x00000010
+				KGSL_CLK_AXI    0x00000020
+
+Bus Scaling Data:
+- qcom,grp3d-vectors:	A series of 4 cell properties, format of which is:
+						<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 1
+						<src dst ab ib>, <src dst ab ib>, // For Bus Scaling Usecase 2
+						<..  ..  .. ..>, <..  ..  .. ..>; // For Bus Scaling Usecase n
+						This property is a series of all vectors for all Bus Scaling Usecases.
+						Each set of vectors for each usecase describes bandwidth votes for a combination
+						of src/dst ports.  The driver will set the desired use case based on the selected
+						power level and the desired bandwidth vote will be registered for the port pairs.
+					Current values of src are:
+						0 = MSM_BUS_MASTER_GRAPHICS_3D
+						1 = MSM_BUS_MASTER_GRAPHICS_3D_PORT1
+						2 = MSM_BUS_MASTER_V_OCMEM_GFX3D
+					Current values of dst are:
+						0 = MSM_BUS_SLAVE_EBI_CH0
+						1 = MSM_BUS_SLAVE_OCMEM
+					ab: Represents aggregated bandwidth. This value is 0 for Graphics.
+					ib: Represents instantaneous bandwidth. This value has a range <0 8000 MB/s>
+- qcom,grp3d-num-vectors-per-usecase:	This represents the number of vectors in each Bus Scaling Usecase.
+- qcom,grp3d-num-bus-scale-usecases:	This is the the number of Bus Scaling use cases defined in the vectors property
+
+GDSC Oxili Regulators:
+- vddcx-supply:			Phandle for vddcx regulator device node.
+- vdd-supply:			Phandle for vdd regulator device node.
+
+IOMMU Data:
+- iommu:			Phandle for the KGSL IOMMU device node
+
+GPU Power levels:
+- qcom,gpu-pwrlevels:		Container for the GPU Power Levels (see
+				adreno-pwrlevels.txt)
+
+DCVS Core info
+- qcom,dcvs-core-info		Container for the DCVS core info (see
+				dcvs-core-info.txt)
+
+Optional Properties:
+- qcom,initial-powerlevel: This value indicates which qcom,gpu-pwrlevel should be used at start time
+			   and when coming back out of resume
+- qcom,idle-timeout:	   This property represents the time in microseconds for idle timeout.
+- qcom,nap-allowed:	   Boolean. <0> or <1> to disable/enable nap.
+- qcom,chipid:		   If it exists this property is used to replace
+			   the chip identification read from the GPU hardware.
+			   This is used to override faulty hardware readings.
+
+Example of A330 GPU in MSM8974:
+
+/ {
+	qcom,kgsl-3d0@fdb00000 {
+		label = "kgsl-3d0";
+		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+		reg = <0xfdb00000 0x20000>;
+		reg-names = "kgsl_3d0_reg_memory";
+		interrupts = <0 33 0>;
+		interrupt-names = "kgsl_3d0_irq";
+		qcom,id = <0>;
+
+		qcom,chipid = <0x03030000>;
+
+		/* Power Settings */
+
+		qcom,initial-pwrlevel = <1>;
+		qcom,idle-timeout = <83>; //<HZ/12>
+		qcom,nap-allowed = <1>;
+		qcom,clk-map = <0x00000016>; //KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE
+
+		/* Bus Scale Settings */
+		qcom,grp3d-vectors = <0 0 0 0>, <2 1 0 0>,
+				<0 0 0 2000>, <2 1 0 3000>,
+				<0 0 0 4000>, <2 1 0 5000>,
+				<0 0 0 6400>, <2 1 0 7600>;
+		qcom,grp3d-num-vectors-per-usecase = <2>;
+		qcom,grp3d-num-bus-scale-usecases = <4>;
+
+		/* GDSC oxili regulators */
+		vddcx-supply = <&gdsc_oxili_cx>;
+		vdd-supply = <&gdsc_oxili_gx>;
+
+		/* IOMMU Data */
+		iommu = <&kgsl>;
+
+		qcom,gpu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,gpu-pwrlevels";
+
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <5000000000>;
+				qcom,bus-freq = <3>;
+				qcom,io-fraction = <0>;
+			};
+		};
+
+		qcom,dcvs-core-info {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,dcvs-core-info";
+
+			qcom,core-max-time-us = <100000>;
+			qcom,algo-slack-time-us = <39000>;
+			qcom,algo-disable-pc-threshold = <86000>;
+			qcom,algo-ss-window-size = <1000000>;
+			qcom,algo-ss-util-pct = <95>;
+			qcom,algo-em-max-util-pct = <97>;
+			qcom,algo-ss-iobusy-conv = <100>;
+
+			qcom,dcvs-freq@0 {
+				reg = <0>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <333932>;
+			};
+
+			qcom,dcvs-freq@1 {
+				reg = <1>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <497532>;
+			};
+
+			qcom,dcvs-freq@2 {
+				reg = <2>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <707610>;
+			};
+
+			qcom,dcvs-freq@3 {
+				reg = <3>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <844545>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-server.txt b/Documentation/devicetree/bindings/media/video/msm-cam-server.txt
new file mode 100644
index 0000000..2b6f513
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-server.txt
@@ -0,0 +1,11 @@
+* Qualcomm MSM Camera Server
+
+Required properties:
+- compatible :
+    - "qcom,cam_server"
+
+Example:
+
+   qcom,cam_server {
+       compatible = "qcom,cam_server";
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
new file mode 100644
index 0000000..75916e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -0,0 +1,23 @@
+* Qualcomm MSM CCI
+
+Required properties:
+- cell-index: cci hardware core index
+- compatible :
+    - "qcom,cci"
+- reg : offset and length of the register set for the device
+    for the cci operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the cci interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+
+Example:
+
+   qcom,cci@0xfda0c000 {
+       cell-index = <0>;
+       compatible = "qcom,cci";
+       reg = <0xfda0c000 0x300>;
+       reg-names = "cci";
+       interrupts = <0 50 0>;
+       interrupt-names = "cci";
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-cpp.txt b/Documentation/devicetree/bindings/media/video/msm-cpp.txt
new file mode 100644
index 0000000..5cf0154
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cpp.txt
@@ -0,0 +1,25 @@
+* Qualcomm MSM CPP
+
+Required properties:
+- cell-index: cpp hardware core index
+- compatible :
+    - "qcom,cpp"
+- reg : offset and length of the register set for the device
+    for the cpp operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the cpp interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+- vdd-supply: phandle to GDSC regulator controlling VFE & CPP core.
+
+Example:
+
+   qcom,cpp@0xfda04000 {
+       cell-index = <0>;
+       compatible = "qcom,cpp";
+       reg = <0xfda04000 0x100>;
+       reg-names = "cpp";
+       interrupts = <0 49 0>;
+       interrupt-names = "cpp";
+       vdd-supply = <&gdsc_vfe>;
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-csi-phy.txt b/Documentation/devicetree/bindings/media/video/msm-csi-phy.txt
new file mode 100644
index 0000000..90bdbda
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-csi-phy.txt
@@ -0,0 +1,23 @@
+* Qualcomm MSM CSI Phy
+
+Required properties:
+- cell-index: csi phy hardware core index
+- compatible :
+    - "qcom,csiphy"
+- reg : offset and length of the register set for the device
+    for the csiphy operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the csiphy interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+
+Example:
+
+   qcom,csiphy@fda0ac00 {
+       cell-index = <0>;
+       compatible = "qcom,csiphy";
+       reg = <0xfda0ac00 0x200>;
+       reg-names = "csiphy";
+       interrupts = <0 78 0>;
+       interrupt-names = "csiphy";
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-csid.txt b/Documentation/devicetree/bindings/media/video/msm-csid.txt
new file mode 100644
index 0000000..76a2825
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-csid.txt
@@ -0,0 +1,23 @@
+* Qualcomm MSM CSID
+
+Required properties:
+- cell-index: csid hardware core index
+- compatible :
+    - "qcom,csid"
+- reg : offset and length of the register set for the device
+    for the csid operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the csid interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+
+Example:
+
+   qcom,csid@fda08000 {
+       cell-index = <0>;
+       compatible = "qcom,csid";
+       reg = <0xfda08000 0x200>;
+       reg-names = "csid";
+       interrupts = <0 51 0>;
+       interrupt-names = "csiphy";
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-irqrouter.txt b/Documentation/devicetree/bindings/media/video/msm-irqrouter.txt
new file mode 100644
index 0000000..63fb7d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-irqrouter.txt
@@ -0,0 +1,18 @@
+* Qualcomm MSM IRQ Router
+
+Required properties:
+- cell-index: irq router hardware core index
+- compatible :
+    - "qcom,irqrouter"
+- reg : offset and length of the register set for the device
+    for the irqrouter operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+
+Example:
+
+   qcom,irqrouter@0xfda0c000 {
+       cell-index = <0>;
+       compatible = "qcom,irqrouter";
+       reg = <0xfda00000 0x100>;
+       reg-names = "irqrouter";
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-ispif.txt b/Documentation/devicetree/bindings/media/video/msm-ispif.txt
new file mode 100644
index 0000000..ff33b17
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-ispif.txt
@@ -0,0 +1,23 @@
+* Qualcomm MSM ISPIF
+
+Required properties:
+- cell-index: ispif hardware core index
+- compatible :
+    - "qcom,ispif"
+- reg : offset and length of the register set for the device
+    for the ispif operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the ispif interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+
+Example:
+
+   qcom,ispif@0xfda0a000 {
+       cell-index = <0>;
+       compatible = "qcom,ispif";
+       reg = <0xfda0a000 0x300>;
+       reg-names = "ispif";
+       interrupts = <0 55 0>;
+       interrupt-names = "ispif";
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-jpeg.txt b/Documentation/devicetree/bindings/media/video/msm-jpeg.txt
new file mode 100644
index 0000000..41e0b3f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-jpeg.txt
@@ -0,0 +1,25 @@
+* Qualcomm MSM JPEG
+
+Required properties:
+- cell-index: jpeg hardware core index
+- compatible :
+    - "qcom,jpeg"
+- reg : offset and length of the register set for the device
+    for the jpeg operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the jpeg interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+- vdd-supply: phandle to GDSC regulator controlling JPEG core.
+
+Example:
+
+   qcom,jpeg@0xfda20000 {
+       cell-index = <0>;
+       compatible = "qcom,jpeg";
+       reg = <0xfda20000 0x400>;
+       reg-names = "jpeg";
+       interrupts = <0 60 0>;
+       interrupt-names = "jpeg";
+       vdd-supply = <&gdsc_jpeg>;
+   };
diff --git a/Documentation/devicetree/bindings/media/video/msm-vfe.txt b/Documentation/devicetree/bindings/media/video/msm-vfe.txt
new file mode 100644
index 0000000..7a70cac
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-vfe.txt
@@ -0,0 +1,25 @@
+* Qualcomm MSM VFE
+
+Required properties:
+- cell-index: vfe hardware core index
+- compatible :
+    - "qcom,vfe"
+- reg : offset and length of the register set for the device
+    for the vfe operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the vfe interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+- vdd-supply: phandle to GDSC regulator controlling VFE core.
+
+Example:
+
+   qcom,vfe@0xfda10000 {
+       cell-index = <0>;
+       compatible = "qcom,vfe";
+       reg = <0xfda10000 0x1000>;
+       reg-names = "vfe";
+       interrupts = <0 58 0>;
+       interrupt-names = "vfe";
+       vdd-supply = <&gdsc_vfe>;
+   };
diff --git a/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
index 1549f10..e212aca 100644
--- a/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
+++ b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
@@ -18,6 +18,7 @@
 		    "ocmem_irq" corresponds to OCMEM Error Interrupt.
 		    "dm_irq" corresponds to DM Interrupt.
 - qcom,ocmem-num-regions: The number of OCMEM hardware memory regions.
+- qcom,resource-type: The hardware resource type of the OCMEM core.
 
 In addition to the information on the OCMEM core, the
 device tree contains additional information describing partitions
@@ -51,6 +52,7 @@
 		interrupts = <0 76 0 0 77 0>;
 		interrupt-names = "ocmem_irq", "dm_irq";
 		qcom,ocmem-num-regions = <0x3>;
+		qcom,resource-type = <0x706d636f>
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges = <0x0 0xfec00000 0x180000>;
diff --git a/Documentation/devicetree/bindings/pwm/qpnp-pwm.txt b/Documentation/devicetree/bindings/pwm/qpnp-pwm.txt
new file mode 100644
index 0000000..83ce3f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/qpnp-pwm.txt
@@ -0,0 +1,160 @@
+Qualcomm QPNP PWM/LPG controller
+
+qpnp-pwm driver supports Pulse Width Module (PWM) functionality. PWM feature is
+used in range of applications such as varying Display brightness, LED dimming,
+etc. The Qualcomm PMICs have a physical device called Light Pulse Generator
+(LPG). In addition to support PWM functionality, the LPG module provides
+a rich set of user defined PWM pattern configurations, such as sawtooth, linear
+up, linear down, triangular patterns etc. The PWM patterns are used in
+applications such as charger driver where the driver uses these patterns
+to indicate various states of charging.
+
+Required device bindings:
+- compatible:		should be "qcom,qpnp-pwm"
+- reg:			Offset and length of the controller's LPG channel register,
+			and LPG look-up table (LUT). The LPG look-up table is a
+			contiguous address space that is populated with PWM values.
+			The size of PWM value is 9 bit and the size of each
+			entry of the table is 8 bit. Thus, two entries are used
+			to fill each PWM value. The lower entry is used for PWM
+			LSB byte and higher entry is used for PWM MSB bit.
+- reg-names:		Names for the above registers.
+			"qpnp-lpg-channel-base" = physical base address of the
+			controller's LPG channel register.
+			"qpnp-lpg-lut-base" = physical base address of LPG LUT.
+- qcom,channel-id:	channel Id for the PWM.
+
+Optional device bindings:
+- qcom,channel-owner:	A string value to supply owner information.
+- qcom,mode-select:	0 = PWM mode
+			1 = LPG mode
+If this binding is specified along with the required bindings of PWM/LPG then
+in addition to configure PWM/LPG the qpnp-pwm driver also enables the feature
+at the probe time. In the case where the binding is not specified the qpnp-pwm
+driver does not enable the feature. Also, it is considered an error to specify
+a particular mode using this binding but not the respective feature subnode.
+
+All PWM devices support both PWM and LPG features within the same device.
+To support each feature, there are some required and optional bindings passed
+through device tree.
+
+The PWM device can enable one feature (either PWM or LPG) at any given time.
+Therefore, the qpnp-pwm driver applies the last PWM or LPG feature configuration
+and enables that feature.
+
+Required bindings to support PWM feature:
+- qcom,period:	PWM period time in microseconds.
+- qcom,duty:	PWM duty time in microseconds.
+- label:	"pwm"
+
+Required bindings to support LPG feature:
+The following bindings are needed to configure LPG mode, where a list of
+duty cycle percentages is populated. The size of the list cannot exceed
+the size of the LPG look-up table.
+
+- qcom,period:			PWM period time in microseconds.
+- qcom,duty-percents:		List of entries for look-up table
+- cell-index:			Index of look-up table that should be used to start
+				filling up the duty-pct list. start-idx + size of list
+				cannot exceed the size of look-up table.
+- label:			"lpg"
+
+
+Optional bindings to support LPG feature:
+- qcom,ramp-step-duration:	Time (in ms) to wait before loading next entry of LUT
+- qcom,lpg-lut-pause-hi:	Time (in ms) to wait once pattern reaches to hi
+				index.
+- qcom,lpg-lut-pause-lo:	Time (in ms) to wait once pattern reaches to lo
+				index.
+- qcom,lpg-lut-ramp-direction:	1 = Start the pattern from lo index to hi index.
+				0 = Start the pattern from hi index to lo index.
+- qcom,lpg-lut-pattern-repeat:	1 = Repeat the pattern after the pause once it
+				reaches to last duty cycle.
+				0 = Do not repeat the pattern.
+- qcom,lpg-lut-ramp-toggle:	1 = Toggle the direction of the pattern.
+				0 = Do not toggle the direction.
+- qcom,lpg-lut-enable-pause-hi:	1 = Enable pause time at hi index.
+				0 = Disable pause time at hi index.
+- qcom,lpg-lut-enable-pause-lo:	1 = Enable pause time at lo index.
+				0 = Disable pause time at lo index.
+
+
+Example:
+        qcom,spmi@fc4c0000 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                qcom,pm8941@1 {
+                        spmi-slave-container;
+                        reg = <0x1>;
+                        #address-cells = <1>;
+                        #size-cells = <1>;
+
+                        pwm@b100 {
+                                #address-cells = <1>;
+                                #size-cells = <1>;
+                                compatible = "qcom,qpnp-pwm";
+                                reg = <0xb100 0x100>,
+                                      <0xb040 0x80>;
+				reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+                                qcom,channel-id = <0>;
+				status = "okay";
+                        };
+
+                        pwm@b200 {
+                                #address-cells = <1>;
+                                #size-cells = <1>;
+                                compatible = "qcom,qpnp-pwm";
+                                reg = <0xb200 0x100>,
+                                      <0xb040 0x80>;
+				reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+                                qcom,channel-id = <1>;
+                                qcom,period = <6000000>;
+				status = "okay";
+				qcom,pwm {
+					qcom,duty = <4000000>;
+					label = "pwm";
+				};
+                        };
+
+                        pwm@b500 {
+                                #address-cells = <1>;
+                                #size-cells = <1>;
+                                compatible = "qcom,qpnp-pwm";
+                                reg = <0xb500 0x100>,
+                                      <0xb040 0x80>;
+				reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+                                qcom,channel-id = <4>;
+                                qcom,period = <6000000>;
+				qcom,mode-select = <0>;
+				qcom,channel-owner = "RGB-led";
+				status = "okay";
+
+				qcom,pwm {
+					qcom,duty = <4000000>;
+					label = "pwm";
+				};
+
+				qcom,lpg {
+					qcom,duty-percents = <1 14 28 42 56 84 100
+							100 84 56 42 28 14 1>;
+					cell-index = <0>;
+					qcom,ramp-step-duration = <20>;
+					label = "lpg";
+				};
+                        };
+                };
+        };
+
+There are couple of ways to configure PWM device channels as shown in above
+example,
+1. The PWM device channel #0 is configured with only required device bindings.
+In this case, the qpnp-pwm driver does not configure any mode by default.
+
+2. The qpnp-pwm driver configures PWM device channel #1 with PWM feature
+configuration, but does not enable the channel since "qcom,mode-select" binding
+is not specified in the devicetree.
+
+3. Both the PWM and LPG configurations are provided for PWM device channel #4.
+The qpnp-pwm driver configures both the modes, but enables PWM mode at the probe
+time. It also sets the channel owner information for the channel.
diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
index cf727d9..ecac09d 100644
--- a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
+++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
@@ -25,6 +25,9 @@
  - qcom,max-clk-gear: Maximum clock gear at which this controller can be run
 		 (range: 1-10)
 		 Default value will be 10 if this entry is not specified
+ - qcom,rxreg-access: This boolean indicates that slimbus RX should use direct
+		 register access to receive data. This flag is only needed if
+		 BAM pipe is not available to receive data from slimbus
 Example:
 	slim@fe12f000 {
 		cell-index = <1>;
@@ -35,4 +38,5 @@
 		interrupts = <0 163 0 0 164 0>;
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
 		qcom,min-clk-gear = <10>;
+		qcom,rxreg-access;
 	};
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 95ddf34..0516dff 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -49,3 +49,70 @@
 		qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
 		qcom,hsusb-otg-pmic-id-irq = <47>
 	};
+
+BAM:
+
+Required properties:
+- compatible: should be "qcom,usb-bam-msm"
+- regs: offset and length of the register set in the memory map
+- interrupts: IRQ line
+- qcom,usb-active-bam: active BAM type. Can be one of
+            0 - HSUSB_BAM
+            1 - HSIC_BAM
+- qcom,usb-total-bam-num: total number of BAMs that are supported
+- qcom,usb-bam-num-pipes: max number of pipes that can be used
+- qcom,usb-base-address: physical base address of the BAM
+
+A number of USB BAM pipe parameters are represented as sub-nodes:
+
+Subnode Required:
+- label: a string describing the pipe's direction and use
+- qcom,usb-bam-type: BAM type. Can be one of
+            0 - HSUSB_BAM
+            1 - HSIC_BAM
+- qcom,src-bam-physical-address: source BAM physical address
+- qcom,src-bam-pipe-index: source BAM pipe index
+- qcom,dst-bam-physical-address: destination BAM physical address
+- qcom,dst-bam-pipe-index: destination BAM pipe index
+- qcom,data-fifo-offset: data fifo offset address
+- qcom,data-fifo-size: data fifo size
+- qcom,descriptor-fifo-offset: descriptor fifo offset address
+- qcom,descriptor-fifo-size: descriptor fifo size
+
+Example USB BAM controller device node:
+
+	qcom,usbbam@f9304000 {
+		compatible = "qcom,usb-bam-msm";
+		reg = <0xf9304000 0x9000>;
+		interrupts = <0 132 0>;
+		qcom,usb-active-bam = <0>;
+		qcom,usb-total-bam-num = <1>;
+		qcom,usb-bam-num-pipes = <16>;
+		qcom,usb-base-address = <0xf9200000>;
+
+		qcom,pipe1 {
+			label = "usb-to-peri-qdss-dwc3";
+			qcom,usb-bam-type = <0>;
+			qcom,src-bam-physical-address = <0>;
+			qcom,src-bam-pipe-index = <0>;
+			qcom,dst-bam-physical-address = <0>;
+			qcom,dst-bam-pipe-index = <0>;
+			qcom,data-fifo-offset = <0>;
+			qcom,data-fifo-size = <0>;
+			qcom,descriptor-fifo-offset = <0>;
+			qcom,descriptor-fifo-size = <0>;
+		};
+
+		qcom,pipe2 {
+			label = "peri-to-usb-qdss-dwc3";
+			qcom,usb-bam-type = <0>;
+			qcom,src-bam-physical-address = <0xfc37C000>;
+			qcom,src-bam-pipe-index = <0>;
+			qcom,dst-bam-physical-address = <0xf9304000>;
+			qcom,dst-bam-pipe-index = <2>;
+			qcom,data-fifo-offset = <0xf0000>;
+			qcom,data-fifo-size = <0x4000>;
+			qcom,descriptor-fifo-offset = <0xf4000>;
+			qcom,descriptor-fifo-size = <0x1400>;
+		};
+	};
diff --git a/Documentation/usb/misc_ksbridge.txt b/Documentation/usb/misc_ksbridge.txt
new file mode 100644
index 0000000..f409dc1
--- /dev/null
+++ b/Documentation/usb/misc_ksbridge.txt
@@ -0,0 +1,46 @@
+Introduction
+--------------
+ksbridge is a simple misc device which bridges Kickstart application
+to HSIC h/w. Driver supports two instances, one instance for
+flash-less-boot/ram-dumps and other instance for EFS Sync.
+
+Initialization
+--------------
+Create two bridge instances and register for usb devices 0x9008 and
+0x9048/0x904C. Misc device name depends on the USB PID.
+For PID: 9008, misc device name is ks_bridge and for PID:9048/904C,
+misc device name is efs_bridge. After KS opens the misc device, IN
+URBs will be submitted to H/W; By default IN URBS are configured
+to 20.
+
+TX PATH
+-------
+Transmit path is very simple. Bridge driver will exposes write system
+call to kickstart. Data from write call will be put into a list and a
+work is scheduled to take the data from the list and write to HSIC.
+
+Functions:
+ksb_fs_write: System call invoked when kickstart writes the data
+ksb_tomdm_work: Work function which submits data to HSIC h/w.
+
+Data Structures:
+to_mdm_list: Data is stored in this list
+to_mdm_work: mapped to ksb_tomdm_work function
+
+RX PATH
+-------
+During initialization 20 IN URBs are submitted to hsic controller. In
+completion handler of each URB, buffer is de-queued and add to a list.
+Read function is woken-up. A new buffer is created and submitted to
+controller.
+
+Functions:
+ksb_fs_read: system call invoked by ks when it tries to read the data
+ksb_rx_cb: rx urb completion handler
+ksb_start_rx_work: function called during initialization.
+
+Data Structures:
+ks_wait_q: read system call will block on this queue until data is
+available or device is disconnected
+to_ks_list: data queued to this list by rx urb completion handler,
+later de-queued by read system call.
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 6737e89..51ec10c 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -332,6 +332,25 @@
 				interrupts = <0x0 0x61 0x1>;
 			};
 		};
+
+		vadc@3100 {
+			compatible = "qcom,qpnp-vadc";
+			reg = <0x3100 0x100>;
+			interrupts = <0x0 0x31 0x0>;
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+
+			chan@0 {
+				label = "usb_in";
+				qcom,channel-num = <0>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <20>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+		};
 	};
 
 	qcom,pm8941@1 {
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
new file mode 100644
index 0000000..0375e93
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	qcom,cam_server {
+		compatible = "qcom,cam_server";
+		reg = <0xfd8C0000 0x10000>;
+		reg-names = "server";
+	};
+	qcom,csiphy@fda0ac00 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy";
+		reg = <0xfda0ac00 0x200>;
+		reg-names = "csiphy";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+	};
+	qcom,csiphy@fda0b000 {
+		cell-index = <1>;
+		compatible = "qcom,csiphy";
+		reg = <0xfda0b000 0x200>;
+		reg-names = "csiphy";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+	};
+	qcom,csiphy@fda0b400 {
+		cell-index = <2>;
+		compatible = "qcom,csiphy";
+		reg = <0xfda0b400 0x200>;
+		reg-names = "csiphy";
+		interrupts = <0 80 0>;
+		interrupt-names = "csiphy";
+	};
+	qcom,csid@fda08000  {
+		cell-index = <0>;
+		compatible = "qcom,csid";
+		reg = <0xfda08000 0x100>;
+		reg-names = "csid";
+		interrupts = <0 51 0>;
+		interrupt-names = "csid";
+	};
+	qcom,csid@fda08400 {
+		cell-index = <1>;
+		compatible = "qcom,csid";
+		reg = <0xfda08400 0x100>;
+		reg-names = "csid";
+		interrupts = <0 52 0>;
+		interrupt-names = "csid";
+	};
+	qcom,csid@fda08800 {
+		cell-index = <2>;
+		compatible = "qcom,csid";
+		reg = <0xfda08800 0x100>;
+		reg-names = "csid";
+		interrupts = <0 53 0>;
+		interrupt-names = "csid";
+	};
+	qcom,csid@fda08C00 {
+		cell-index = <3>;
+		compatible = "qcom,csid";
+		reg = <0xfda08C00 0x100>;
+		reg-names = "csid";
+		interrupts = <0 54 0>;
+		interrupt-names = "csid";
+	};
+	qcom,ispif@fda0A000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif";
+		reg = <0xfda0A000 0x300>;
+		reg-names = "ispif";
+		interrupts = <0 55 0>;
+		interrupt-names = "ispif";
+	};
+	qcom,cci@fda0C000 {
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		reg = <0xfda0C000 0x1000>;
+		reg-names = "cci";
+		interrupts = <0 50 0>;
+		interrupt-names = "cci";
+	};
+	qcom,vfe@fda10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe40";
+		reg = <0xfda10000 0x1000>;
+		reg-names = "vfe";
+		interrupts = <0 57 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+	};
+	qcom,vfe@fda14000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe40";
+		reg = <0xfda14000 0x1000>;
+		reg-names = "vfe";
+		interrupts = <0 58 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+	};
+	qcom,jpeg@fda1c000 {
+		cell-index = <0>;
+		compatible = "qcom,jpeg";
+		reg = <0xfda1c000 0x400>;
+		reg-names = "jpeg";
+		interrupts = <0 59 0>;
+		interrupt-names = "jpeg";
+	};
+	qcom,jpeg@fda20000 {
+		cell-index = <1>;
+		compatible = "qcom,jpeg";
+		reg = <0xfda20000 0x400>;
+		reg-names = "jpeg";
+		interrupts = <0 60 0>;
+		interrupt-names = "jpeg";
+	};
+	qcom,jpeg@fda24000 {
+		cell-index = <2>;
+		compatible = "qcom,jpeg";
+		reg = <0xfda24000 0x400>;
+		reg-names = "jpeg";
+		interrupts = <0 61 0>;
+		interrupt-names = "jpeg";
+	};
+	qcom,irqrouter@fda00000 {
+		cell-index = <0>;
+		compatible = "qcom,irqrouter";
+		reg = <0xfda00000 0x100>;
+		reg-names = "irqrouter";
+	};
+	qcom,cpp@fda04000 {
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0xfda04000 0x100>;
+		reg-names = "cpp";
+		interrupts = <0 49 0>;
+		interrupt-names = "cpp";
+		vdd-supply = <&gdsc_vfe>;
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-gpu.dtsi b/arch/arm/boot/dts/msm8974-gpu.dtsi
new file mode 100644
index 0000000..a972d7f
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-gpu.dtsi
@@ -0,0 +1,126 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/ {
+	qcom,kgsl-3d0@fdb00000 {
+		label = "kgsl-3d0";
+		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+		reg = <0xfdb00000 0x20000>;
+		reg-names = "kgsl_3d0_reg_memory";
+		interrupts = <0 33 0>;
+		interrupt-names = "kgsl_3d0_irq";
+		qcom,id = <0>;
+
+		qcom,chipid = <0x03030000>;
+
+		qcom,initial-pwrlevel = <1>;
+
+		qcom,idle-timeout = <83>; //<HZ/12>
+		qcom,nap-allowed = <1>;
+		qcom,clk-map = <0x00000016>; //KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE
+
+		/* Bus Scale Settings */
+		qcom,grp3d-vectors = <0 0 0 0>, <2 1 0 0>,
+				<0 0 0 2000>, <2 1 0 3000>,
+				<0 0 0 4000>, <2 1 0 5000>,
+				<0 0 0 6400>, <2 1 0 7600>;
+		qcom,grp3d-num-vectors-per-usecase = <2>;
+		qcom,grp3d-num-bus-scale-usecases = <4>;
+
+		/* GDSC oxili regulators */
+		vddcx-supply = <&gdsc_oxili_cx>;
+		vdd-supply = <&gdsc_oxili_gx>;
+
+		/* Power levels */
+
+		/* IOMMU Data */
+		iommu = <&kgsl_iommu>;
+
+		qcom,gpu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,gpu-pwrlevels";
+
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <500000000>;
+				qcom,bus-freq = <3>;
+				qcom,io-fraction = <0>;
+			};
+
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <333000000>;
+				qcom,bus-freq = <2>;
+				qcom,io-fraction = <33>;
+			};
+
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <200000000>;
+				qcom,bus-freq = <1>;
+				qcom,io-fraction = <100>;
+			};
+
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <27000000>;
+				qcom,bus-freq = <0>;
+				qcom,io-fraction = <0>;
+			};
+		};
+
+		qcom,dcvs-core-info {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,dcvs-core-info";
+
+			qcom,core-max-time-us = <100000>;
+			qcom,algo-slack-time-us = <39000>;
+			qcom,algo-disable-pc-threshold = <86000>;
+			qcom,algo-ss-window-size = <1000000>;
+			qcom,algo-ss-util-pct = <95>;
+			qcom,algo-em-max-util-pct = <97>;
+			qcom,algo-ss-iobusy-conv = <100>;
+
+			qcom,dcvs-freq@0 {
+				reg = <0>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <333932>;
+			};
+
+			qcom,dcvs-freq@1 {
+				reg = <1>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <497532>;
+			};
+
+			qcom,dcvs-freq@2 {
+				reg = <2>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <707610>;
+			};
+
+			qcom,dcvs-freq@3 {
+				reg = <3>;
+				qcom,freq = <0>;
+				qcom,idle-energy = <0>;
+				qcom,active-energy = <844545>;
+			};
+		};
+
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-ion.dtsi b/arch/arm/boot/dts/msm8974-ion.dtsi
new file mode 100644
index 0000000..1893ae4
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-ion.dtsi
@@ -0,0 +1,76 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ion-heap@30 { /* SYSTEM HEAP */
+			reg = <30>;
+		};
+
+		qcom,ion-heap@8 { /* CP_MM HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <8>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x7800000>;
+		};
+
+		qcom,ion-heap@29 { /* FIRMWARE HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <29>;
+			qcom,heap-align = <0x20000>;
+			qcom,heap-adjacent = <8>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0xA00000>;
+		};
+
+		qcom,ion-heap@12 { /* MFC HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <12>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x2000>;
+		};
+
+		qcom,ion-heap@24 { /* SF HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <24>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x2800000>;
+		};
+
+		qcom,ion-heap@25 { /* IOMMU HEAP */
+			reg = <25>;
+		};
+
+		qcom,ion-heap@27 { /* QSECOM HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <27>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x600000>;
+		};
+
+		qcom,ion-heap@28 { /* AUDIO HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <28>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x2B4000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 91894de..a187223 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -127,9 +127,9 @@
 	rpm-regulator-smpb3 {
 		status = "okay";
 		pm8841_s3: regulator-s3 {
-			regulator-min-microvolt = <1150000>;
-			regulator-max-microvolt = <1150000>;
-			qcom,init-voltage = <1150000>;
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1050000>;
+			qcom,init-voltage = <1050000>;
 			status = "okay";
 		};
 	};
@@ -218,9 +218,9 @@
 		status = "okay";
 		pm8941_l3: regulator-l3 {
 			parent-supply = <&pm8941_s1>;
-			regulator-min-microvolt = <1200000>;
-			regulator-max-microvolt = <1200000>;
-			qcom,init-voltage = <1200000>;
+			regulator-min-microvolt = <1225000>;
+			regulator-max-microvolt = <1225000>;
+			qcom,init-voltage = <1225000>;
 			status = "okay";
 		};
 	};
@@ -229,9 +229,9 @@
 		status = "okay";
 		pm8941_l4: regulator-l4 {
 			parent-supply = <&pm8941_s1>;
-			regulator-min-microvolt = <1150000>;
-			regulator-max-microvolt = <1150000>;
-			qcom,init-voltage = <1150000>;
+			regulator-min-microvolt = <12250000>;
+			regulator-max-microvolt = <12250000>;
+			qcom,init-voltage = <12250000>;
 			status = "okay";
 		};
 	};
@@ -303,9 +303,9 @@
 		status = "okay";
 		pm8941_l11: regulator-l11 {
 			parent-supply = <&pm8941_s1>;
-			regulator-min-microvolt = <1250000>;
-			regulator-max-microvolt = <1250000>;
-			qcom,init-voltage = <1250000>;
+			regulator-min-microvolt = <1300000>;
+			regulator-max-microvolt = <1300000>;
+			qcom,init-voltage = <1300000>;
 			status = "okay";
 		};
 	};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index edb57dd..f144421 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -13,7 +13,10 @@
 /include/ "skeleton.dtsi"
 /include/ "msm8974_pm.dtsi"
 /include/ "msm8974-iommu.dtsi"
+/include/ "msm8974-camera.dtsi"
 /include/ "msm-gdsc.dtsi"
+/include/ "msm8974-ion.dtsi"
+/include/ "msm8974-gpu.dtsi"
 
 / {
 	model = "Qualcomm MSM 8974";
@@ -85,7 +88,9 @@
 		cell-index = <1>; /* SDC1 eMMC slot */
 		compatible = "qcom,msm-sdcc";
 		reg = <0xf9824000 0x1000>;
+		reg-names = "core_mem";
 		interrupts = <0 123 0>;
+		interrupt-names = "core_irq";
 		vdd-supply = <&pm8941_l20>;
 		vdd-io-supply = <&pm8941_s3>;
 
@@ -112,7 +117,9 @@
 		cell-index = <2>; /* SDC2 SD card slot */
 		compatible = "qcom,msm-sdcc";
 		reg = <0xf98a4000 0x1000>;
+		reg-names = "core_mem";
 		interrupts = <0 125 0>;
+		interrupt-names = "core_irq";
 		vdd-supply = <&pm8941_l21>;
 		vdd-io-supply = <&pm8941_l13>;
 
@@ -141,7 +148,9 @@
 		cell-index = <3>; /* SDC3 SDIO slot */
 		compatible = "qcom,msm-sdcc";
 		reg = <0xf9864000 0x1000>;
+		reg-names = "core_mem";
 		interrupts = <0 127 0>;
+		interrupt-names = "core_irq";
 
 		gpios = <&msmgpio 40 0>, /* CLK */
 			<&msmgpio 39 0>, /* CMD */
@@ -162,7 +171,9 @@
 		cell-index = <4>; /* SDC4 SDIO slot */
 		compatible = "qcom,msm-sdcc";
 		reg = <0xf98e4000 0x1000>;
+		reg-names = "core_mem";
 		interrupts = <0 129 0>;
+		interrupt-names = "core_irq";
 
 		gpios = <&msmgpio 93 0>, /* CLK */
 			<&msmgpio 91 0>, /* CMD */
@@ -205,6 +216,7 @@
 		interrupts = <0 163 0 0 164 0>;
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
 		qcom,min-clk-gear = <10>;
+		qcom,rxreg-access;
 	};
 
 	spmi_bus: qcom,spmi@fc4c0000 {
@@ -591,8 +603,8 @@
 		reg-names = "tsens_physical", "tsens_eeprom_physical";
 		interrupts = <0 184 0>;
 		qcom,sensors = <11>;
-		qcom,slope = <1134 1122 1142 1123 1176 1176 1176 1186 1176
-				1176 1176>;
+		qcom,slope = <3200 3200 3200 3200 3200 3200 3200 3200 3200
+				3200 3200>;
 	};
 
 	qcom,msm-rtb {
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index 314f91b..00325c9 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -218,7 +218,6 @@
 # CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set
 CONFIG_DIAG_CHAR=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_DCC_TTY=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 # CONFIG_I2C_MSM is not set
diff --git a/arch/arm/configs/msm7630_defconfig b/arch/arm/configs/msm7630_defconfig
index aad13b8..5c5a152 100644
--- a/arch/arm/configs/msm7630_defconfig
+++ b/arch/arm/configs/msm7630_defconfig
@@ -238,7 +238,6 @@
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_DIAG_CHAR=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_DCC_TTY=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QUP=y
diff --git a/arch/arm/configs/msm8660-perf_defconfig b/arch/arm/configs/msm8660-perf_defconfig
index f3b2219..a51b76d 100644
--- a/arch/arm/configs/msm8660-perf_defconfig
+++ b/arch/arm/configs/msm8660-perf_defconfig
@@ -417,8 +417,6 @@
 CONFIG_ANDROID_TIMED_GPIO=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_MSM_SSBI=y
-CONFIG_MSM_IOMMU=y
-# CONFIG_IOMMU_PGTABLES_L2 is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index 4748496..0efe658 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -282,7 +282,6 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM=y
-CONFIG_DCC_TTY=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 # CONFIG_I2C_MSM is not set
@@ -419,8 +418,6 @@
 CONFIG_ANDROID_TIMED_GPIO=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_MSM_SSBI=y
-CONFIG_MSM_IOMMU=y
-# CONFIG_IOMMU_PGTABLES_L2 is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 66e71fc..1f2e285 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -233,6 +233,9 @@
 CONFIG_BT_BNEP_PROTO_FILTER=y
 CONFIG_BT_HIDP=y
 CONFIG_BT_HCISMD=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=m
 # CONFIG_CFG80211_WEXT is not set
 CONFIG_RFKILL=y
@@ -285,6 +288,8 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PMIC8XXX_PWRKEY=y
 CONFIG_INPUT_UINPUT=y
+CONFIG_STM_LIS3DH=y
+CONFIG_INPUT_MPU3050=y
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_N_SMUX=y
 CONFIG_N_SMUX_LOOPBACK=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 3731845..8c79847 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -237,6 +237,9 @@
 CONFIG_BT_BNEP_PROTO_FILTER=y
 CONFIG_BT_HIDP=y
 CONFIG_BT_HCISMD=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=m
 # CONFIG_CFG80211_WEXT is not set
 CONFIG_RFKILL=y
@@ -289,6 +292,8 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PMIC8XXX_PWRKEY=y
 CONFIG_INPUT_UINPUT=y
+CONFIG_STM_LIS3DH=y
+CONFIG_INPUT_MPU3050=y
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_N_SMUX=y
 CONFIG_N_SMUX_LOOPBACK=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index b9f26d9..6f4db42 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -51,6 +51,11 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
 CONFIG_MSM_OCMEM=y
+CONFIG_MSM_MEMORY_DUMP=y
+CONFIG_MSM_CACHE_ERP=y
+CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS=y
+CONFIG_MSM_L2_ERP_2BIT_PANIC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_SMP=y
@@ -146,6 +151,8 @@
 CONFIG_POWER_SUPPLY=y
 # CONFIG_BATTERY_MSM is not set
 # CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_TSENS8974=y
 CONFIG_REGULATOR_STUB=y
 CONFIG_REGULATOR_QPNP=y
 CONFIG_MEDIA_SUPPORT=y
@@ -157,6 +164,7 @@
 # CONFIG_RADIO_ADAPTERS is not set
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_MSM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_MSM=y
 # CONFIG_FB_MSM_BACKLIGHT is not set
@@ -236,8 +244,6 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
 CONFIG_KEYS=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_SHA256=y
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 37cbfcb..0d0103a 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -108,6 +108,12 @@
 	enum arm_pmu_type type;
 	cpumask_t	active_irqs;
 	const char	*name;
+	int		num_events;
+	atomic_t	active_events;
+	struct mutex	reserve_mutex;
+	u64		max_period;
+	struct platform_device	*plat_device;
+	u32		from_idle;
 	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
 	int     	(*request_pmu_irq)(int irq, irq_handler_t *irq_h);
 	void    	(*free_pmu_irq)(int irq);
@@ -123,11 +129,6 @@
 	void		(*stop)(void);
 	void		(*reset)(void *);
 	int		(*map_event)(struct perf_event *event);
-	int		num_events;
-	atomic_t	active_events;
-	struct mutex	reserve_mutex;
-	u64		max_period;
-	struct platform_device	*plat_device;
 	struct pmu_hw_events	*(*get_hw_events)(void);
 	int	(*test_set_event_constraints)(struct perf_event *event);
 	int	(*clear_event_constraints)(struct perf_event *event);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 94aa75e..bc81696 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,9 +37,11 @@
 #endif
 
 /*
- * The fixup involves disabling interrupts during execution of the WFE
- * instruction. This could potentially lead to deadlock if a thread is trying
- * to acquire a spinlock which is being released from an interrupt context.
+ * The fixup involves disabling FIQs during execution of the WFE instruction.
+ * This could potentially lead to deadlock if a thread is trying to acquire a
+ * spinlock which is being released from an FIQ. This should not be a problem
+ * because FIQs are handled by the secure environment and do not directly
+ * manipulate spinlocks.
  */
 #ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
 #define WFE_SAFE(fixup, tmp) 				\
@@ -47,7 +49,7 @@
 "	cmp	" fixup ", #0\n"			\
 "	wfeeq\n"					\
 "	beq	10f\n"					\
-"	cpsid	if\n"					\
+"	cpsid   f\n"					\
 "	mrc	p15, 7, " fixup ", c15, c0, 5\n"	\
 "	bic	" fixup ", " fixup ", #0x10000\n"	\
 "	mcr	p15, 7, " fixup ", c15, c0, 5\n"	\
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7c44acd..7a8c2d6 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -966,7 +966,7 @@
  * SP points to a minimal amount of processor-private memory, the address
  * of which is copied into r0 for the mode specific abort handler.
  */
-	.macro	vector_stub, name, mode, correction=0
+	.macro	vector_stub, name, mode, fixup, correction=0
 	.align	5
 
 vector_\name:
@@ -995,6 +995,18 @@
 	and	lr, lr, #0x0f
  THUMB(	adr	r0, 1f			)
  THUMB(	ldr	lr, [r0, lr, lsl #2]	)
+	.if	\fixup
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+	ldr	r0, .krait_fixup
+	ldr	r0, [r0]
+	cmp	r0, #0
+	beq	10f
+	mrc	p15, 7, r0, c15, c0, 5
+	orr	r0, r0, #0x10000
+	mcr	p15, 7, r0, c15, c0, 5
+10:	isb
+#endif
+	.endif
 	mov	r0, sp
  ARM(	ldr	lr, [pc, lr, lsl #2]	)
 	movs	pc, lr			@ branch to handler in SVC mode
@@ -1010,7 +1022,7 @@
 /*
  * Interrupt dispatcher
  */
-	vector_stub	irq, IRQ_MODE, 4
+	vector_stub	irq, IRQ_MODE, 1, 4
 
 	.long	__irq_usr			@  0  (USR_26 / USR_32)
 	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
@@ -1033,7 +1045,7 @@
  * Data abort dispatcher
  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  */
-	vector_stub	dabt, ABT_MODE, 8
+	vector_stub	dabt, ABT_MODE, 0, 8
 
 	.long	__dabt_usr			@  0  (USR_26 / USR_32)
 	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
@@ -1056,7 +1068,7 @@
  * Prefetch abort dispatcher
  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  */
-	vector_stub	pabt, ABT_MODE, 4
+	vector_stub	pabt, ABT_MODE, 0, 4
 
 	.long	__pabt_usr			@  0 (USR_26 / USR_32)
 	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
@@ -1079,7 +1091,7 @@
  * Undef instr entry dispatcher
  * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  */
-	vector_stub	und, UND_MODE
+	vector_stub	und, UND_MODE, 0
 
 	.long	__und_usr			@  0 (USR_26 / USR_32)
 	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
@@ -1131,6 +1143,8 @@
 
 .LCvswi:
 	.word	vector_swi
+.krait_fixup:
+	.word	msm_krait_need_wfe_fixup
 
 	.globl	__stubs_end
 __stubs_end:
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 778128b..e97aef2 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -600,6 +600,21 @@
 	struct arm_pmu *armpmu = to_arm_pmu(pmu);
 	struct pmu_hw_events *hw_events = armpmu->get_hw_events();
 	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+	int idx;
+
+	if (armpmu->from_idle) {
+		for (idx = 0; idx <= cpu_pmu->num_events; ++idx) {
+			struct perf_event *event = hw_events->events[idx];
+
+			if (!event)
+				continue;
+
+			armpmu->enable(&event->hw, idx, event->cpu);
+		}
+
+		/* Reset bit so we don't needlessly re-enable counters.*/
+		armpmu->from_idle = 0;
+	}
 
 	if (enabled)
 		armpmu->start();
@@ -716,6 +731,7 @@
  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
  * junk values out of them.
  */
+
 static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
 					unsigned long action, void *hcpu)
 {
@@ -785,6 +801,11 @@
 	case CPU_PM_ENTER_FAILED:
 	case CPU_PM_EXIT:
 		if (cpu_has_active_perf() && cpu_pmu->reset) {
+			/*
+			 * Flip this bit so armpmu_enable knows it needs
+			 * to re-enable active counters.
+			 */
+			cpu_pmu->from_idle = 1;
 			cpu_pmu->reset(NULL);
 			perf_pmu_enable(&cpu_pmu->pmu);
 		}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 28d6e60..ca3e996 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -651,7 +651,8 @@
 
 	cpumask_copy(&mask, cpu_online_mask);
 	cpumask_clear_cpu(smp_processor_id(), &mask);
-	smp_cross_call(&mask, IPI_CPU_STOP);
+	if (!cpumask_empty(&mask))
+		smp_cross_call(&mask, IPI_CPU_STOP);
 
 	/* Wait up to one second for other CPUs to stop */
 	timeout = USEC_PER_SEC;
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 0aec27a..1665abd 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -2095,6 +2095,14 @@
 		deadlocks. It does not run during the bootup process, so it will
 		not catch any early lockups.
 
+config MSM_MEMORY_DUMP
+	bool "MSM Memory Dump Support"
+	help
+		This enables memory dump feature. It allows various client
+		subsystems to register respective dump regions. At the time
+		of deadlocks or cpu hangs these dump regions are captured to
+		give a snapshot of the system at the time of the crash.
+
 config MSM_DLOAD_MODE
 	bool "Enable download mode on crashes"
 	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM9615
@@ -2290,6 +2298,33 @@
 	  Enable support for On-Chip Memory available on certain MSM chipsets.
 	  OCMEM is a low latency, high performance pool shared by subsystems.
 
+config MSM_OCMEM_LOCAL_POWER_CTRL
+	bool "OCMEM Local Power Control"
+	depends on MSM_OCMEM
+	help
+	  Enable direct power management of the OCMEM core by the
+	  OCMEM driver. By default power management is delegated to
+	  the RPM. Selecting this option causes the OCMEM driver to
+	  directly handle the various macro power transitions.
+
+config MSM_OCMEM_DEBUG
+	bool "OCMEM Debug Support"
+	depends on MSM_OCMEM
+	help
+	  Enable debug options for On-chip Memory (OCMEM) driver.
+	  Various debug options include memory, power and latency.
+	  Choosing one of these options allows debugging of each
+	  individual subsystem separately.
+
+config MSM_OCMEM_POWER_DEBUG
+	bool "OCMEM Power Debug Support"
+	depends on MSM_OCMEM_DEBUG
+	help
+	  Enable debug support for OCMEM power management.
+	  This adds support for verifying all power management
+	  related operations of OCMEM. Both local power management
+	  and RPM assisted power management operations are supported.
+
 config MSM_RTB
 	bool "Register tracing"
 	help
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index c839a4a..b96ccec 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -229,6 +229,7 @@
 obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog.o
 obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog_asm.o
 obj-$(CONFIG_MSM_WATCHDOG_V2) += msm_watchdog_v2.o
+obj-$(CONFIG_MSM_MEMORY_DUMP) += msm_memory_dump.o
 obj-$(CONFIG_MACH_MSM8X60_SURF) += board-msm8x60.o
 obj-$(CONFIG_MACH_MSM8X60_FFA) += board-msm8x60.o
 obj-$(CONFIG_MACH_MSM8X60_FLUID) += board-msm8x60.o
@@ -250,7 +251,7 @@
 obj-$(CONFIG_MACH_MSM7627A_QRD1) += board-qrd7627a.o board-7627a-all.o
 obj-$(CONFIG_MACH_MSM7627A_QRD3) += board-qrd7627a.o board-7627a-all.o
 obj-$(CONFIG_MACH_MSM7627A_EVB) += board-qrd7627a.o board-7627a-all.o
-obj-$(CONFIG_ARCH_MSM8625) += devices-msm7x27a.o clock-pcom-lookup.o mpm-8625.o
+obj-$(CONFIG_ARCH_MSM8625) += msm_smem_iface.o devices-msm7x27a.o clock-pcom-lookup.o mpm-8625.o
 obj-$(CONFIG_MACH_MSM8625_RUMI3) += board-msm7x27a.o
 obj-$(CONFIG_MACH_MSM8625_SURF) +=  board-msm7x27a.o board-7627a-all.o
 obj-$(CONFIG_MACH_MSM8625_EVB) +=  board-qrd7627a.o board-7627a-all.o
@@ -335,7 +336,7 @@
 obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
 endif
 obj-$(CONFIG_MSM_OCMEM) += ocmem.o ocmem_allocator.o ocmem_notifier.o
-obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o
+obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o ocmem_core.o
 
 obj-$(CONFIG_ARCH_MSM7X27) += gpiomux-7x27.o gpiomux-v1.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index 639cc94..09a1be7 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -249,14 +249,16 @@
 /* 8625 PLL4 @ 1209MHz with GSM capable modem */
 static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1209[] = {
 	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
-	{ 0, 61440, ACPU_PLL_1, 1, 3,  7680, 3, 1, 61440 },
-	{ 1, 122880, ACPU_PLL_1, 1, 1,  15360, 3, 2, 61440 },
-	{ 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 3, 61440 },
-	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
-	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
-	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
-	{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
-	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
+	{ 0, 61440, ACPU_PLL_1, 1, 3,  7680, 3, 0, 61440 },
+	{ 0, 122880, ACPU_PLL_1, 1, 1,  15360, 3, 1, 61440 },
+	{ 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 1, 61440 },
+	{ 0, 300000, ACPU_PLL_2, 2, 3, 37500, 3, 2, 122880 },
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+	{ 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+	{ 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
 	{ 0 }
 };
 
@@ -264,13 +266,14 @@
 static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1209[] = {
 	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 24576 },
 	{ 0, 65536, ACPU_PLL_1, 1, 3,  8192, 3, 1, 49152 },
-	{ 1, 98304, ACPU_PLL_1, 1, 1,  12288, 3, 2, 49152 },
+	{ 0, 98304, ACPU_PLL_1, 1, 1,  12288, 3, 2, 49152 },
 	{ 1, 196608, ACPU_PLL_1, 1, 0, 24576, 3, 3, 98304 },
-	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
-	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
-	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
-	{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
-	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+	{ 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+	{ 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
 	{ 0 }
 };
 
@@ -725,7 +728,7 @@
 		if ((delta > drv_state.max_speed_delta_khz)
 				|| (strt_s->pll == ACPU_PLL_4 &&
 					tgt_s->pll == ACPU_PLL_4))
-			clk_disable_unprepare(pll_clk[backup_s->pll].clk);
+			clk_disable(pll_clk[backup_s->pll].clk);
 
 		goto done;
 	}
@@ -959,17 +962,31 @@
 		}
 	}
 
+	if (acpu_freq_tbl == NULL) {
+		pr_crit("Unknown PLL configuration!\n");
+		BUG();
+	}
+
 	/*
-	 * When PLL4 can run max @ 1401.6MHz, we have to support
-	 * dynamic reprograming of PLL4.
-	 *
+	 * Turn ON the dynamic reprogramming method
+	 * if one of the table entry has pll_rate defined.
+	 */
+	for ( ; t->tbl->a11clk_khz; t->tbl++) {
+		if (t->tbl->pll_rate) {
+			if (!dynamic_reprogram) {
+				dynamic_reprogram = 1;
+				pr_info("Dynamic reprogramming is ON\n");
+			}
+		}
+	}
+
+	/*
 	 * Also find the backup pll used during PLL4 reprogramming.
 	 * We are using PLL2@600MHz as backup PLL, since 800MHz jump
 	 * is fine.
 	 */
-	if (t->pll4_rate == 1401) {
-		dynamic_reprogram = 1;
-		for ( ; t->tbl->a11clk_khz; t->tbl++) {
+	if (dynamic_reprogram) {
+		for (t->tbl = acpu_freq_tbl; t->tbl->a11clk_khz; t->tbl++) {
 			if (t->tbl->pll == ACPU_PLL_2 &&
 					t->tbl->a11clk_src_div == 1) {
 				backup_s = t->tbl;
@@ -977,11 +994,6 @@
 			}
 		}
 	}
-
-	if (acpu_freq_tbl == NULL) {
-		pr_crit("Unknown PLL configuration!\n");
-		BUG();
-	}
 }
 
 /*
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 8c89014..22275b4 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -23,7 +23,7 @@
 #include "acpuclock-krait.h"
 
 /* Corner type vreg VDD values */
-#define LVL_NONE	RPM_REGULATOR_CORNER_RETENTION
+#define LVL_NONE	RPM_REGULATOR_CORNER_NONE
 #define LVL_LOW		RPM_REGULATOR_CORNER_SVS_SOC
 #define LVL_NOM		RPM_REGULATOR_CORNER_NORMAL
 #define LVL_HIGH	RPM_REGULATOR_CORNER_SUPER_TURBO
@@ -94,11 +94,10 @@
 };
 
 static struct msm_bus_paths bw_level_tbl[] __initdata = {
-	[0] =  BW_MBPS(400), /* At least  50 MHz on bus. */
-	[1] =  BW_MBPS(800), /* At least 100 MHz on bus. */
-	[2] = BW_MBPS(1334), /* At least 167 MHz on bus. */
-	[3] = BW_MBPS(2666), /* At least 200 MHz on bus. */
-	[4] = BW_MBPS(3200), /* At least 333 MHz on bus. */
+	[0] =  BW_MBPS(552), /* At least  69 MHz on bus. */
+	[1] = BW_MBPS(1112), /* At least 139 MHz on bus. */
+	[2] = BW_MBPS(2224), /* At least 278 MHz on bus. */
+	[3] = BW_MBPS(4448), /* At least 556 MHz on bus. */
 };
 
 static struct msm_bus_scale_pdata bus_scale_data __initdata = {
@@ -109,31 +108,59 @@
 };
 
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {  300000, PLL_0, 0, 2,   0 }, LVL_LOW, 1050000, 2 },
-	[1]  = { {  384000, HFPLL, 2, 0,  40 }, LVL_NOM, 1050000, 2 },
-	[2]  = { {  460800, HFPLL, 2, 0,  48 }, LVL_NOM, 1050000, 2 },
-	[3]  = { {  537600, HFPLL, 1, 0,  28 }, LVL_NOM, 1050000, 2 },
-	[4]  = { {  576000, HFPLL, 1, 0,  30 }, LVL_NOM, 1050000, 3 },
-	[5]  = { {  652800, HFPLL, 1, 0,  34 }, LVL_NOM, 1050000, 3 },
-	[6]  = { {  729600, HFPLL, 1, 0,  38 }, LVL_NOM, 1050000, 3 },
-	[7]  = { {  806400, HFPLL, 1, 0,  42 }, LVL_NOM, 1050000, 3 },
-	[8]  = { {  883200, HFPLL, 1, 0,  46 }, LVL_NOM, 1050000, 4 },
-	[9]  = { {  960000, HFPLL, 1, 0,  50 }, LVL_NOM, 1050000, 4 },
-	[10] = { { 1036800, HFPLL, 1, 0,  54 }, LVL_NOM, 1050000, 4 },
+	[0]  = { {  300000, PLL_0, 0, 2,   0 }, LVL_LOW,   950000, 0 },
+	[1]  = { {  384000, HFPLL, 2, 0,  40 }, LVL_NOM,   950000, 1 },
+	[2]  = { {  460800, HFPLL, 2, 0,  48 }, LVL_NOM,   950000, 1 },
+	[3]  = { {  537600, HFPLL, 1, 0,  28 }, LVL_NOM,   950000, 2 },
+	[4]  = { {  576000, HFPLL, 1, 0,  30 }, LVL_NOM,   950000, 2 },
+	[5]  = { {  652800, HFPLL, 1, 0,  34 }, LVL_NOM,   950000, 2 },
+	[6]  = { {  729600, HFPLL, 1, 0,  38 }, LVL_NOM,   950000, 2 },
+	[7]  = { {  806400, HFPLL, 1, 0,  42 }, LVL_NOM,   950000, 2 },
+	[8]  = { {  883200, HFPLL, 1, 0,  46 }, LVL_HIGH, 1050000, 2 },
+	[9]  = { {  960000, HFPLL, 1, 0,  50 }, LVL_HIGH, 1050000, 2 },
+	[10] = { { 1036800, HFPLL, 1, 0,  54 }, LVL_HIGH, 1050000, 3 },
+	[11] = { { 1113600, HFPLL, 1, 0,  58 }, LVL_HIGH, 1050000, 3 },
+	[12] = { { 1190400, HFPLL, 1, 0,  62 }, LVL_HIGH, 1050000, 3 },
+	[13] = { { 1267200, HFPLL, 1, 0,  66 }, LVL_HIGH, 1050000, 3 },
+	[14] = { { 1344000, HFPLL, 1, 0,  70 }, LVL_HIGH, 1050000, 3 },
+	[15] = { { 1420800, HFPLL, 1, 0,  74 }, LVL_HIGH, 1050000, 3 },
+	[16] = { { 1497600, HFPLL, 1, 0,  78 }, LVL_HIGH, 1050000, 3 },
+	[17] = { { 1574400, HFPLL, 1, 0,  82 }, LVL_HIGH, 1050000, 3 },
+	[18] = { { 1651200, HFPLL, 1, 0,  86 }, LVL_HIGH, 1050000, 3 },
+	[19] = { { 1728000, HFPLL, 1, 0,  90 }, LVL_HIGH, 1050000, 3 },
+	[20] = { { 1804800, HFPLL, 1, 0,  94 }, LVL_HIGH, 1050000, 3 },
+	[21] = { { 1881600, HFPLL, 1, 0,  98 }, LVL_HIGH, 1050000, 3 },
+	[22] = { { 1958400, HFPLL, 1, 0, 102 }, LVL_HIGH, 1050000, 3 },
+	[23] = { { 2035200, HFPLL, 1, 0, 106 }, LVL_HIGH, 1050000, 3 },
+	[24] = { { 2112000, HFPLL, 1, 0, 110 }, LVL_HIGH, 1050000, 3 },
+	[25] = { { 2188800, HFPLL, 1, 0, 114 }, LVL_HIGH, 1050000, 3 },
 };
 
 static struct acpu_level acpu_freq_tbl[] __initdata = {
-	{ 1, {  300000, PLL_0, 0, 2,   0 }, L2(0),  1050000, 3200000 },
-	{ 1, {  384000, HFPLL, 2, 0,  40 }, L2(1),  1050000, 3200000 },
-	{ 1, {  460800, HFPLL, 2, 0,  48 }, L2(2),  1050000, 3200000 },
-	{ 1, {  537600, HFPLL, 1, 0,  28 }, L2(3),  1050000, 3200000 },
-	{ 1, {  576000, HFPLL, 1, 0,  30 }, L2(4),  1050000, 3200000 },
-	{ 1, {  652800, HFPLL, 1, 0,  34 }, L2(5),  1050000, 3200000 },
-	{ 1, {  729600, HFPLL, 1, 0,  38 }, L2(6),  1050000, 3200000 },
-	{ 1, {  806400, HFPLL, 1, 0,  42 }, L2(7),  1050000, 3200000 },
-	{ 1, {  883200, HFPLL, 1, 0,  46 }, L2(8),  1050000, 3200000 },
-	{ 1, {  960000, HFPLL, 1, 0,  50 }, L2(9),  1050000, 3200000 },
-	{ 1, { 1036800, HFPLL, 1, 0,  54 }, L2(10), 1050000, 3200000 },
+	{ 1, {  300000, PLL_0, 0, 2,   0 }, L2(0),   950000, 3200000 },
+	{ 1, {  384000, HFPLL, 2, 0,  40 }, L2(3),   950000, 3200000 },
+	{ 1, {  460800, HFPLL, 2, 0,  48 }, L2(3),   950000, 3200000 },
+	{ 1, {  537600, HFPLL, 1, 0,  28 }, L2(5),   950000, 3200000 },
+	{ 1, {  576000, HFPLL, 1, 0,  30 }, L2(5),   950000, 3200000 },
+	{ 1, {  652800, HFPLL, 1, 0,  34 }, L2(5),   950000, 3200000 },
+	{ 1, {  729600, HFPLL, 1, 0,  38 }, L2(5),   950000, 3200000 },
+	{ 1, {  806400, HFPLL, 1, 0,  42 }, L2(7),   950000, 3200000 },
+	{ 1, {  883200, HFPLL, 1, 0,  46 }, L2(7),   950000, 3200000 },
+	{ 1, {  960000, HFPLL, 1, 0,  50 }, L2(7),   950000, 3200000 },
+	{ 1, { 1036800, HFPLL, 1, 0,  54 }, L2(7),   950000, 3200000 },
+	{ 0, { 1113600, HFPLL, 1, 0,  58 }, L2(12), 1050000, 3200000 },
+	{ 0, { 1190400, HFPLL, 1, 0,  62 }, L2(12), 1050000, 3200000 },
+	{ 0, { 1267200, HFPLL, 1, 0,  66 }, L2(12), 1050000, 3200000 },
+	{ 0, { 1344000, HFPLL, 1, 0,  70 }, L2(15), 1050000, 3200000 },
+	{ 0, { 1420800, HFPLL, 1, 0,  74 }, L2(15), 1050000, 3200000 },
+	{ 0, { 1497600, HFPLL, 1, 0,  78 }, L2(15), 1050000, 3200000 },
+	{ 0, { 1574400, HFPLL, 1, 0,  82 }, L2(20), 1050000, 3200000 },
+	{ 0, { 1651200, HFPLL, 1, 0,  86 }, L2(20), 1050000, 3200000 },
+	{ 0, { 1728000, HFPLL, 1, 0,  90 }, L2(20), 1050000, 3200000 },
+	{ 0, { 1804800, HFPLL, 1, 0,  94 }, L2(25), 1050000, 3200000 },
+	{ 0, { 1881600, HFPLL, 1, 0,  98 }, L2(25), 1050000, 3200000 },
+	{ 0, { 1958400, HFPLL, 1, 0, 102 }, L2(25), 1050000, 3200000 },
+	{ 0, { 1996800, HFPLL, 1, 0, 104 }, L2(25), 1050000, 3200000 },
 	{ 0, { 0 } }
 };
 
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 1c19442..f57771c 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -493,6 +493,22 @@
 		},
 	},
 };
+static struct msm_gpiomux_config cyts_gpio_alt_config[] __initdata = {
+	{	/* TS INTERRUPT */
+		.gpio = 6,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &cyts_int_act_cfg,
+			[GPIOMUX_SUSPENDED] = &cyts_int_sus_cfg,
+		},
+	},
+	{	/* TS SLEEP */
+		.gpio = 12,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &cyts_sleep_act_cfg,
+			[GPIOMUX_SUSPENDED] = &cyts_sleep_sus_cfg,
+		},
+	},
+};
 
 static struct gpiomux_setting hsic_act_cfg = {
 	.func = GPIOMUX_FUNC_1,
@@ -788,7 +804,7 @@
 static struct gpiomux_setting mdm2ap_status_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_8MA,
-	.pull = GPIOMUX_PULL_NONE,
+	.pull = GPIOMUX_PULL_DOWN,
 };
 
 static struct gpiomux_setting mdm2ap_errfatal_cfg = {
@@ -828,6 +844,7 @@
 	{
 		.gpio = 49,
 		.settings = {
+			[GPIOMUX_ACTIVE] = &mdm2ap_status_cfg,
 			[GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg,
 		}
 	},
@@ -1201,6 +1218,7 @@
 void __init apq8064_init_gpiomux(void)
 {
 	int rc;
+	int platform_version = socinfo_get_platform_version();
 
 	rc = msm_gpiomux_init(NR_GPIO_IRQS);
 	if (rc) {
@@ -1259,11 +1277,17 @@
 		msm_gpiomux_install(mdm_configs,
 			ARRAY_SIZE(mdm_configs));
 
-#ifdef CONFIG_USB_EHCI_MSM_HSIC
-	if (machine_is_apq8064_mtp())
-		msm_gpiomux_install(cyts_gpio_configs,
-				ARRAY_SIZE(cyts_gpio_configs));
+	if (machine_is_apq8064_mtp()) {
+		if (SOCINFO_VERSION_MINOR(platform_version) == 1) {
+			msm_gpiomux_install(cyts_gpio_alt_config,
+					ARRAY_SIZE(cyts_gpio_alt_config));
+		} else {
+			msm_gpiomux_install(cyts_gpio_configs,
+					ARRAY_SIZE(cyts_gpio_configs));
+		}
+	}
 
+#ifdef CONFIG_USB_EHCI_MSM_HSIC
 	if (machine_is_apq8064_mtp())
 		msm_gpiomux_install(apq8064_hsic_configs,
 				ARRAY_SIZE(apq8064_hsic_configs));
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 678eb9e..e77e7c0 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -127,6 +127,7 @@
 	/* TABLA CODEC RESET */
 	PM8921_GPIO_OUTPUT(34, 1, MED),
 	PM8921_GPIO_OUTPUT(13, 0, HIGH),               /* PCIE_CLK_PWR_EN */
+	PM8921_GPIO_INPUT(12, PM_GPIO_PULL_UP_30),     /* PCIE_WAKE_N */
 };
 
 static struct pm8xxx_gpio_init pm8921_mtp_kp_gpios[] __initdata = {
@@ -140,6 +141,12 @@
 	PM8921_GPIO_INPUT(17, PM_GPIO_PULL_UP_1P5),	/* SD_WP */
 };
 
+static struct pm8xxx_gpio_init pm8921_mpq_gpios[] __initdata = {
+	PM8921_GPIO_INIT(27, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0,
+			PM_GPIO_PULL_NO, PM_GPIO_VIN_VPH, PM_GPIO_STRENGTH_NO,
+			PM_GPIO_FUNC_NORMAL, 0, 0),
+};
+
 /* Initial PM8XXX MPP configurations */
 static struct pm8xxx_mpp_init pm8xxx_mpps[] __initdata = {
 	PM8921_MPP_INIT(3, D_OUTPUT, PM8921_MPP_DIG_LEVEL_VPH, DOUT_CTRL_LOW),
@@ -185,6 +192,18 @@
 			}
 		}
 
+	if (machine_is_mpq8064_cdp() || machine_is_mpq8064_hrd()
+					|| machine_is_mpq8064_dtv())
+		for (i = 0; i < ARRAY_SIZE(pm8921_mpq_gpios); i++) {
+			rc = pm8xxx_gpio_config(pm8921_mpq_gpios[i].gpio,
+						&pm8921_mpq_gpios[i].config);
+			if (rc) {
+				pr_err("%s: pm8xxx_gpio_config: rc=%d\n",
+					__func__, rc);
+				break;
+			}
+		}
+
 	for (i = 0; i < ARRAY_SIZE(pm8xxx_mpps); i++) {
 		rc = pm8xxx_mpp_config(pm8xxx_mpps[i].mpp,
 					&pm8xxx_mpps[i].config);
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 6317685..f733ba6 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -128,7 +128,8 @@
 #define PCIE_AXI_BAR_PHYS   0x08000000
 #define PCIE_AXI_BAR_SIZE   SZ_128M
 
-/* PCIe power enable pmic gpio */
+/* PCIe pmic gpios */
+#define PCIE_WAKE_N_PMIC_GPIO 12
 #define PCIE_PWR_EN_PMIC_GPIO 13
 #define PCIE_RST_N_PMIC_MPP 1
 
@@ -887,6 +888,8 @@
 	-1
 };
 
+#define PMIC_GPIO_DP		27    /* PMIC GPIO for D+ change */
+#define PMIC_GPIO_DP_IRQ	PM8921_GPIO_IRQ(PM8921_IRQ_BASE, PMIC_GPIO_DP)
 static struct msm_otg_platform_data msm_otg_pdata = {
 	.mode			= USB_OTG,
 	.otg_control		= OTG_PMIC_CONTROL,
@@ -912,6 +915,9 @@
 		if (machine_is_apq8064_liquid())
 			msm_ehci_host_pdata3.dock_connect_irq =
 					PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
+		else
+			msm_ehci_host_pdata3.pmic_gpio_dp_irq =
+							PMIC_GPIO_DP_IRQ;
 
 		apq8064_device_ehci_host3.dev.platform_data =
 				&msm_ehci_host_pdata3;
@@ -1373,6 +1379,7 @@
 };
 #define CYTTSP_TS_GPIO_IRQ		6
 #define CYTTSP_TS_GPIO_SLEEP		33
+#define CYTTSP_TS_GPIO_SLEEP_ALT	12
 
 static ssize_t tma340_vkeys_show(struct kobject *kobj,
 			struct kobj_attribute *attr, char *buf)
@@ -1726,6 +1733,12 @@
 	.mdm2ap_vddmin_gpio = 80,
 };
 
+static struct gpiomux_setting mdm2ap_status_gpio_run_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
 static struct mdm_platform_data mdm_platform_data = {
 	.mdm_version = "3.0",
 	.ramdump_delay_ms = 2000,
@@ -1734,6 +1747,7 @@
 	.vddmin_resource = &mdm_vddmin_rscs,
 	.peripheral_platform_device = &apq8064_device_hsic_host,
 	.ramdump_timeout_ms = 120000,
+	.mdm2ap_status_gpio_run_cfg = &mdm2ap_status_gpio_run_cfg,
 };
 
 static struct tsens_platform_data apq_tsens_pdata  = {
@@ -2075,6 +2089,7 @@
 	.gpio = msm_pcie_gpio_info,
 	.axi_addr = PCIE_AXI_BAR_PHYS,
 	.axi_size = PCIE_AXI_BAR_SIZE,
+	.wake_n = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, PCIE_WAKE_N_PMIC_GPIO),
 };
 
 static int __init mpq8064_pcie_enabled(void)
@@ -2250,7 +2265,6 @@
 	&msm_bus_8064_cpss_fpb,
 	&apq8064_msm_device_vidc,
 	&msm_pil_dsps,
-	&msm_8960_riva,
 	&msm_8960_q6_lpass,
 	&msm_pil_vidc,
 	&msm_gss,
@@ -2940,7 +2954,10 @@
 	platform_device_register(&apq8064_slim_ctrl);
 	slim_register_board_info(apq8064_slim_devices,
 		ARRAY_SIZE(apq8064_slim_devices));
-	apq8064_init_dsps();
+	if (!PLATFORM_IS_MPQ8064()) {
+		apq8064_init_dsps();
+		platform_device_register(&msm_8960_riva);
+	}
 	msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
 	msm_spm_l2_init(msm_spm_l2_data);
 	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
@@ -2956,6 +2973,9 @@
 {
 	if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
 		pr_err("meminfo_init() failed!\n");
+	if (machine_is_apq8064_mtp() &&
+		SOCINFO_VERSION_MINOR(socinfo_get_platform_version()) == 1)
+			cyttsp_pdata.sleep_gpio = CYTTSP_TS_GPIO_SLEEP_ALT;
 	apq8064_common_init();
 	if (machine_is_mpq8064_cdp() || machine_is_mpq8064_hrd() ||
 		machine_is_mpq8064_dtv()) {
diff --git a/arch/arm/mach-msm/board-8960-gpiomux.c b/arch/arm/mach-msm/board-8960-gpiomux.c
index 5851990..1771bb9 100644
--- a/arch/arm/mach-msm/board-8960-gpiomux.c
+++ b/arch/arm/mach-msm/board-8960-gpiomux.c
@@ -55,6 +55,19 @@
 	.pull = GPIOMUX_PULL_NONE,
 };
 
+static struct gpiomux_setting gsbi6_active_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting gsbi6_suspended_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+
 static struct gpiomux_setting external_vfr[] = {
 	/* Suspended state */
 	{
@@ -436,6 +449,27 @@
 		},
 	},
 	{
+		.gpio      = 27,        /* GSBI6 BT_INT2AP_N for AR3002 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg,
+			[GPIOMUX_ACTIVE]    = &gsbi6_active_cfg,
+		},
+	},
+	{
+		.gpio      = 28,        /* GSBI6 BT_EN for AR3002 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg,
+			[GPIOMUX_ACTIVE]    = &gsbi6_active_cfg,
+		},
+	},
+	{
+		.gpio      = 29,        /* GSBI6 BT_WAKE for AR3002 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg,
+			[GPIOMUX_ACTIVE]    = &gsbi6_active_cfg,
+		},
+	},
+	{
 		.gpio      = 44,	/* GSBI12 I2C QUP SDA */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gsbi12,
@@ -759,6 +793,16 @@
 	},
 };
 
+static struct msm_gpiomux_config hap_lvl_shft_config_sglte[] __initdata = {
+	{
+		.gpio = 89,
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &hap_lvl_shft_suspended_config,
+			[GPIOMUX_ACTIVE] = &hap_lvl_shft_active_config,
+		},
+	},
+};
+
 static struct msm_gpiomux_config sglte_configs[] __initdata = {
 	/* AP2MDM_STATUS */
 	{
@@ -979,8 +1023,9 @@
 	}
 
 #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE)
-	msm_gpiomux_install(msm8960_ethernet_configs,
-			ARRAY_SIZE(msm8960_ethernet_configs));
+	if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE)
+		msm_gpiomux_install(msm8960_ethernet_configs,
+				ARRAY_SIZE(msm8960_ethernet_configs));
 #endif
 
 	msm_gpiomux_install(msm8960_gsbi_configs,
@@ -1007,9 +1052,15 @@
 #endif
 
 	if (machine_is_msm8960_mtp() || machine_is_msm8960_fluid() ||
-		machine_is_msm8960_liquid() || machine_is_msm8960_cdp())
-		msm_gpiomux_install(hap_lvl_shft_config,
-			ARRAY_SIZE(hap_lvl_shft_config));
+		machine_is_msm8960_liquid() || machine_is_msm8960_cdp()) {
+		if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+			msm_gpiomux_install(hap_lvl_shft_config_sglte,
+				ARRAY_SIZE(hap_lvl_shft_config_sglte));
+
+		else
+			msm_gpiomux_install(hap_lvl_shft_config,
+				ARRAY_SIZE(hap_lvl_shft_config));
+	}
 
 #ifdef CONFIG_USB_EHCI_MSM_HSIC
 	if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 1) &&
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 63eef4a..50a5ed2 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -109,7 +109,6 @@
 
 #define KS8851_RST_GPIO		89
 #define KS8851_IRQ_GPIO		90
-#define HAP_SHIFT_LVL_OE_GPIO	47
 
 #define MHL_GPIO_INT            4
 #define MHL_GPIO_RESET          15
@@ -1706,6 +1705,8 @@
 	},
 };
 
+#define HAP_SHIFT_LVL_OE_GPIO		47
+#define HAP_SHIFT_LVL_OE_GPIO_SGLTE	89
 #define PM_HAP_EN_GPIO		PM8921_GPIO_PM_TO_SYS(33)
 #define PM_HAP_LEN_GPIO		PM8921_GPIO_PM_TO_SYS(20)
 
@@ -1714,8 +1715,13 @@
 static int isa1200_power(int on)
 {
 	int rc = 0;
+	int hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO;
 
-	gpio_set_value(HAP_SHIFT_LVL_OE_GPIO, !!on);
+	if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+		hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO_SGLTE;
+
+
+	gpio_set_value(hap_oe_gpio, !!on);
 
 	rc = on ? msm_xo_mode_vote(xo_handle_d1, MSM_XO_MODE_ON) :
 			msm_xo_mode_vote(xo_handle_d1, MSM_XO_MODE_OFF);
@@ -1728,13 +1734,14 @@
 	return 0;
 
 err_xo_vote:
-	gpio_set_value(HAP_SHIFT_LVL_OE_GPIO, !on);
+	gpio_set_value(hap_oe_gpio, !on);
 	return rc;
 }
 
 static int isa1200_dev_setup(bool enable)
 {
 	int rc = 0;
+	int hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO;
 
 	struct pm_gpio hap_gpio_config = {
 		.direction      = PM_GPIO_DIR_OUT,
@@ -1747,6 +1754,9 @@
 		.output_value   = 0,
 	};
 
+	if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)
+		hap_oe_gpio = HAP_SHIFT_LVL_OE_GPIO_SGLTE;
+
 	if (enable == true) {
 		rc = pm8xxx_gpio_config(PM_HAP_EN_GPIO, &hap_gpio_config);
 		if (rc) {
@@ -1762,14 +1772,14 @@
 			return rc;
 		}
 
-		rc = gpio_request(HAP_SHIFT_LVL_OE_GPIO, "hap_shft_lvl_oe");
+		rc = gpio_request(hap_oe_gpio, "hap_shft_lvl_oe");
 		if (rc) {
 			pr_err("%s: unable to request gpio %d (%d)\n",
-					__func__, HAP_SHIFT_LVL_OE_GPIO, rc);
+					__func__, hap_oe_gpio, rc);
 			return rc;
 		}
 
-		rc = gpio_direction_output(HAP_SHIFT_LVL_OE_GPIO, 0);
+		rc = gpio_direction_output(hap_oe_gpio, 0);
 		if (rc) {
 			pr_err("%s: Unable to set direction\n", __func__);
 			goto free_gpio;
@@ -1783,7 +1793,7 @@
 			goto gpio_set_dir;
 		}
 	} else {
-		gpio_free(HAP_SHIFT_LVL_OE_GPIO);
+		gpio_free(hap_oe_gpio);
 
 		msm_xo_put(xo_handle_d1);
 	}
@@ -1791,9 +1801,9 @@
 	return 0;
 
 gpio_set_dir:
-	gpio_set_value(HAP_SHIFT_LVL_OE_GPIO, 0);
+	gpio_set_value(hap_oe_gpio, 0);
 free_gpio:
-	gpio_free(HAP_SHIFT_LVL_OE_GPIO);
+	gpio_free(hap_oe_gpio);
 	return rc;
 }
 
@@ -2333,7 +2343,7 @@
 	.rst_gpio = KS8851_RST_GPIO,
 };
 
-static struct spi_board_info spi_board_info[] __initdata = {
+static struct spi_board_info spi_eth_info[] __initdata = {
 	{
 		.modalias               = "ks8851",
 		.irq                    = MSM_GPIO_TO_INT(KS8851_IRQ_GPIO),
@@ -2343,6 +2353,8 @@
 		.mode                   = SPI_MODE_0,
 		.platform_data		= &spi_eth_pdata
 	},
+};
+static struct spi_board_info spi_board_info[] __initdata = {
 	{
 		.modalias               = "dsi_novatek_3d_panel_spi",
 		.max_speed_hz           = 10800000,
@@ -2496,6 +2508,88 @@
 static struct msm_serial_hs_platform_data msm_uart_dm9_pdata;
 #endif
 
+#if defined(CONFIG_BT) && defined(CONFIG_BT_HCIUART_ATH3K)
+static struct resource bluesleep_resources[] = {
+	{
+		.name   = "gpio_host_wake",
+		.start  = 27,
+		.end    = 27,
+		.flags  = IORESOURCE_IO,
+	},
+	{
+		.name   = "gpio_ext_wake",
+		.start  = 29,
+		.end    = 29,
+		.flags  = IORESOURCE_IO,
+	},
+	{
+		.name   = "host_wake",
+		.start  = MSM_GPIO_TO_INT(27),
+		.end    = MSM_GPIO_TO_INT(27),
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device msm_bluesleep_device = {
+	.name		= "bluesleep",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(bluesleep_resources),
+	.resource	= bluesleep_resources,
+};
+
+static struct platform_device msm_bt_power_device = {
+	.name = "bt_power",
+};
+
+int gpio_bt_sys_rest_en = 28;
+
+static int bluetooth_power(int on)
+{
+	int rc;
+
+	if (on) {
+		rc = gpio_direction_output(gpio_bt_sys_rest_en, 1);
+		msleep(100);
+	} else {
+		gpio_set_value(gpio_bt_sys_rest_en, 0);
+		rc = gpio_direction_input(gpio_bt_sys_rest_en);
+		msleep(100);
+	}
+	pr_err("%s on= %d rc = %d\n", __func__, on, rc);
+	return 0;
+}
+
+static void __init bt_power_init(void)
+{
+	int rc;
+
+	msm_bt_power_device.dev.platform_data = &bluetooth_power;
+	pr_err("%s enter\n", __func__);
+
+	rc = gpio_request(gpio_bt_sys_rest_en, "bt sys_rst_n");
+	if (rc) {
+		pr_err("%s: unable to request gpio %d (%d)\n",
+			__func__, gpio_bt_sys_rest_en, rc);
+		return;
+	}
+
+	/* When booting up, de-assert BT reset pin */
+	rc = gpio_direction_output(gpio_bt_sys_rest_en, 0);
+	if (rc) {
+		pr_err("%s: Unable to set direction\n", __func__);
+		goto free_gpio;
+	}
+	pr_err("%s done\n", __func__);
+	return;
+
+free_gpio:
+	gpio_free(gpio_bt_sys_rest_en);
+	return;
+}
+#else
+#define bt_power_init(x) do {} while (0)
+#endif
+
 static struct platform_device *common_devices[] __initdata = {
 	&msm8960_device_acpuclk,
 	&msm8960_device_dmov,
@@ -2515,6 +2609,10 @@
 #endif
 	&msm_slim_ctrl,
 	&msm_device_wcnss_wlan,
+#if defined(CONFIG_BT) && defined(CONFIG_BT_HCIUART_ATH3K)
+	&msm_bluesleep_device,
+	&msm_bt_power_device,
+#endif
 #if defined(CONFIG_QSEECOM)
 	&qseecom_device,
 #endif
@@ -2909,7 +3007,7 @@
 		ARRAY_SIZE(sii_device_info),
 	},
 	{
-		I2C_LIQUID,
+		I2C_LIQUID | I2C_FFA,
 		MSM_8960_GSBI10_QUP_I2C_BUS_ID,
 		msm_isa1200_board_info,
 		ARRAY_SIZE(msm_isa1200_board_info),
@@ -3013,10 +3111,12 @@
 	msm8960_device_qup_spi_gsbi1.dev.platform_data =
 				&msm8960_qup_spi_gsbi1_pdata;
 	spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+	if (socinfo_get_platform_subtype() != PLATFORM_SUBTYPE_SGLTE)
+		spi_register_board_info(spi_eth_info, ARRAY_SIZE(spi_eth_info));
 
 	msm8960_init_pmic();
-	if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 2 &&
-		(machine_is_msm8960_mtp())) || machine_is_msm8960_liquid())
+	if (machine_is_msm8960_liquid() || (machine_is_msm8960_mtp() &&
+		(socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE)))
 		msm_isa1200_board_info[0].platform_data = &isa1200_1_pdata;
 	msm8960_i2c_init();
 	msm8960_gfx_init();
@@ -3064,6 +3164,7 @@
 	msm8960_init_dsps();
 	change_memory_power = &msm8960_change_memory_power;
 	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
+	bt_power_init();
 	if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
 		mdm_sglte_device.dev.platform_data = &sglte_platform_data;
 		platform_device_register(&mdm_sglte_device);
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index d65ebe1..240e094 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -20,9 +20,6 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/of_irq.h>
-#ifdef CONFIG_ION_MSM
-#include <linux/ion.h>
-#endif
 #include <linux/memory.h>
 #ifdef CONFIG_ANDROID_PMEM
 #include <linux/android_pmem.h>
@@ -52,17 +49,6 @@
 #include "lpm_resources.h"
 
 #define MSM_KERNEL_EBI1_MEM_SIZE	0x280000
-#ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
-#define MSM_ION_SF_SIZE 0x4000000 /* 64 Mbytes */
-#else
-#define MSM_ION_SF_SIZE 0x2800000 /* 40 Mbytes */
-#endif
-#define MSM_ION_MM_FW_SIZE	0xa00000 /* (10MB) */
-#define MSM_ION_MM_SIZE		0x7800000 /* (120MB) */
-#define MSM_ION_QSECOM_SIZE	0x600000 /* (6MB) */
-#define MSM_ION_MFC_SIZE	SZ_8K
-#define MSM_ION_AUDIO_SIZE	0x2B4000
-#define MSM_ION_HEAP_NUM	8
 
 #ifdef CONFIG_KERNEL_PMEM_EBI_REGION
 static unsigned kernel_ebi1_mem_size = MSM_KERNEL_EBI1_MEM_SIZE;
@@ -90,121 +76,12 @@
 	return MEMTYPE_EBI1;
 }
 
-#ifdef CONFIG_ION_MSM
-static struct ion_cp_heap_pdata cp_mm_ion_pdata = {
-	.permission_type = IPT_TYPE_MM_CARVEOUT,
-	.align = PAGE_SIZE,
-};
-
-static struct ion_cp_heap_pdata cp_mfc_ion_pdata = {
-	.permission_type = IPT_TYPE_MFC_SHAREDMEM,
-	.align = PAGE_SIZE,
-};
-
-static struct ion_co_heap_pdata co_ion_pdata = {
-	.adjacent_mem_id = INVALID_HEAP_ID,
-	.align = PAGE_SIZE,
-};
-
-static struct ion_co_heap_pdata fw_co_ion_pdata = {
-	.adjacent_mem_id = ION_CP_MM_HEAP_ID,
-	.align = SZ_128K,
-};
-
-/**
- * These heaps are listed in the order they will be allocated. Due to
- * video hardware restrictions and content protection the FW heap has to
- * be allocated adjacent (below) the MM heap and the MFC heap has to be
- * allocated after the MM heap to ensure MFC heap is not more than 256MB
- * away from the base address of the FW heap.
- * However, the order of FW heap and MM heap doesn't matter since these
- * two heaps are taken care of by separate code to ensure they are adjacent
- * to each other.
- * Don't swap the order unless you know what you are doing!
- */
-static struct ion_platform_data ion_pdata = {
-	.nr = MSM_ION_HEAP_NUM,
-	.heaps = {
-		{
-			.id	= ION_SYSTEM_HEAP_ID,
-			.type	= ION_HEAP_TYPE_SYSTEM,
-			.name	= ION_VMALLOC_HEAP_NAME,
-		},
-		{
-			.id	= ION_CP_MM_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CP,
-			.name	= ION_MM_HEAP_NAME,
-			.size	= MSM_ION_MM_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &cp_mm_ion_pdata,
-		},
-		{
-			.id	= ION_MM_FIRMWARE_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_MM_FIRMWARE_HEAP_NAME,
-			.size	= MSM_ION_MM_FW_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &fw_co_ion_pdata,
-		},
-		{
-			.id	= ION_CP_MFC_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CP,
-			.name	= ION_MFC_HEAP_NAME,
-			.size	= MSM_ION_MFC_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &cp_mfc_ion_pdata,
-		},
-		{
-			.id	= ION_SF_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_SF_HEAP_NAME,
-			.size	= MSM_ION_SF_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &co_ion_pdata,
-		},
-		{
-			.id	= ION_IOMMU_HEAP_ID,
-			.type	= ION_HEAP_TYPE_IOMMU,
-			.name	= ION_IOMMU_HEAP_NAME,
-		},
-		{
-			.id	= ION_QSECOM_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_QSECOM_HEAP_NAME,
-			.size	= MSM_ION_QSECOM_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &co_ion_pdata,
-		},
-		{
-			.id	= ION_AUDIO_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_AUDIO_HEAP_NAME,
-			.size	= MSM_ION_AUDIO_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &co_ion_pdata,
-		},
-	}
-};
-
-static struct platform_device ion_dev = {
-	.name = "ion-msm",
-	.id = 1,
-	.dev = { .platform_data = &ion_pdata },
-};
-
-static void __init reserve_ion_memory(void)
+static void __init reserve_ebi_memory(void)
 {
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_FW_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_SF_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MFC_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_QSECOM_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_AUDIO_SIZE;
 #ifdef CONFIG_KERNEL_PMEM_EBI_REGION
 	msm_8974_reserve_table[MEMTYPE_EBI1].size += kernel_ebi1_mem_size;
 #endif
 }
-#endif
 
 static struct resource smd_resource[] = {
 	{
@@ -370,9 +247,7 @@
 
 static void __init msm_8974_calculate_reserve_sizes(void)
 {
-#ifdef CONFIG_ION_MSM
-	reserve_ion_memory();
-#endif
+	reserve_ebi_memory();
 }
 
 static struct reserve_info msm_8974_reserve_info __initdata = {
@@ -535,49 +410,12 @@
 
 void __init msm_8974_add_devices(void)
 {
-#ifdef CONFIG_ION_MSM
-	platform_device_register(&ion_dev);
-#endif
 	platform_device_register(&msm_device_smd_8974);
 	platform_device_register(&android_usb_device);
 	platform_add_devices(msm_8974_stub_regulator_devices,
 					msm_8974_stub_regulator_devices_len);
 }
 
-static struct clk_lookup msm_clocks_dummy[] = {
-	CLK_DUMMY("xo",		XO_CLK,		NULL,	OFF),
-	CLK_DUMMY("xo",		XO_CLK,		"pil_pronto",		OFF),
-	CLK_DUMMY("core_clk",	BLSP2_UART_CLK,	"msm_serial_hsl.0",	OFF),
-	CLK_DUMMY("iface_clk",	BLSP2_UART_CLK,	"msm_serial_hsl.0",	OFF),
-	CLK_DUMMY("core_clk",	SDC1_CLK,	NULL,			OFF),
-	CLK_DUMMY("iface_clk",	SDC1_P_CLK,	NULL,			OFF),
-	CLK_DUMMY("core_clk",	SDC3_CLK,	NULL,			OFF),
-	CLK_DUMMY("iface_clk",	SDC3_P_CLK,	NULL,			OFF),
-	CLK_DUMMY("phy_clk", NULL, "msm_otg", OFF),
-	CLK_DUMMY("core_clk", NULL, "msm_otg", OFF),
-	CLK_DUMMY("iface_clk", NULL, "msm_otg", OFF),
-	CLK_DUMMY("xo", NULL, "msm_otg", OFF),
-	CLK_DUMMY("dfab_clk",	DFAB_CLK,	NULL, 0),
-	CLK_DUMMY("dma_bam_pclk",	DMA_BAM_P_CLK,	NULL, 0),
-	CLK_DUMMY("mem_clk",	NULL,	NULL, 0),
-	CLK_DUMMY("core_clk",	SPI_CLK,	"spi_qsd.1",	OFF),
-	CLK_DUMMY("iface_clk",	SPI_P_CLK,	"spi_qsd.1",	OFF),
-	CLK_DUMMY("core_clk",	NULL,	"f9966000.i2c", 0),
-	CLK_DUMMY("iface_clk",	NULL,	"f9966000.i2c", 0),
-	CLK_DUMMY("core_clk",	NULL,	"fe12f000.slim",	OFF),
-	CLK_DUMMY("core_clk", "mdp.0", NULL, 0),
-	CLK_DUMMY("core_clk_src", "mdp.0", NULL, 0),
-	CLK_DUMMY("lut_clk", "mdp.0", NULL, 0),
-	CLK_DUMMY("vsync_clk", "mdp.0", NULL, 0),
-	CLK_DUMMY("iface_clk", "mdp.0", NULL, 0),
-	CLK_DUMMY("bus_clk", "mdp.0", NULL, 0),
-};
-
-struct clock_init_data msm_dummy_clock_init_data __initdata = {
-	.table = msm_clocks_dummy,
-	.size = ARRAY_SIZE(msm_clocks_dummy),
-};
-
 /*
  * Used to satisfy dependencies for devices that need to be
  * run early or in a particular order. Most likely your device doesn't fall
@@ -594,7 +432,7 @@
 	msm_spm_device_init();
 	regulator_stub_init();
 	if (machine_is_msm8974_rumi())
-		msm_clock_init(&msm_dummy_clock_init_data);
+		msm_clock_init(&msm8974_rumi_clock_init_data);
 	else
 		msm_clock_init(&msm8974_clock_init_data);
 	msm8974_init_buses();
diff --git a/arch/arm/mach-msm/board-msm7627a-display.c b/arch/arm/mach-msm/board-msm7627a-display.c
index 3726941..e305fe6 100644
--- a/arch/arm/mach-msm/board-msm7627a-display.c
+++ b/arch/arm/mach-msm/board-msm7627a-display.c
@@ -81,6 +81,7 @@
 	"gpio_disp_reset",
 };
 
+static char lcdc_splash_is_enabled(void);
 static int lcdc_truly_gpio_init(void)
 {
 	int i;
@@ -103,7 +104,12 @@
 					lcdc_truly_gpio_table[i]);
 				goto truly_gpio_fail;
 			}
-			rc = gpio_direction_output(lcdc_truly_gpio_table[i], 0);
+			if (lcdc_splash_is_enabled())
+				rc = gpio_direction_output(
+					lcdc_truly_gpio_table[i], 1);
+			else
+				rc = gpio_direction_output(
+					lcdc_truly_gpio_table[i], 0);
 			if (rc < 0) {
 				pr_err("Error direct lcdc gpio:%d\n",
 					lcdc_truly_gpio_table[i]);
@@ -247,6 +253,7 @@
 static int sku3_lcdc_power_save(int on)
 {
 	int rc = 0;
+	static int cont_splash_done;
 
 	if (on) {
 		sku3_lcdc_lcd_camera_power_onoff(1);
@@ -257,6 +264,11 @@
 			return rc;
 		}
 
+		if (lcdc_splash_is_enabled() && !cont_splash_done) {
+			cont_splash_done = 1;
+			return rc;
+		}
+
 		if (lcdc_truly_gpio_initialized) {
 			/*LCD reset*/
 			gpio_set_value(SKU3_LCDC_GPIO_DISPLAY_RESET, 1);
@@ -778,8 +790,14 @@
 static struct msm_panel_common_pdata mdp_pdata = {
 	.gpio = 97,
 	.mdp_rev = MDP_REV_303,
+	.cont_splash_enabled = 0x1,
 };
 
+static char lcdc_splash_is_enabled()
+{
+	return mdp_pdata.cont_splash_enabled;
+}
+
 #define GPIO_LCDC_BRDG_PD	128
 #define GPIO_LCDC_BRDG_RESET_N	129
 #define GPIO_LCD_DSI_SEL	125
@@ -1142,11 +1160,11 @@
 }
 
 static int qrd3_dsi_gpio_initialized;
+static struct regulator *gpio_reg_2p85v, *gpio_reg_1p8v;
 
 static int mipi_dsi_panel_qrd3_power(int on)
 {
 	int rc = 0;
-	static struct regulator *gpio_reg_2p85v, *gpio_reg_1p8v;
 
 	if (!qrd3_dsi_gpio_initialized) {
 		pmapp_disp_backlight_init();
@@ -1155,21 +1173,42 @@
 		if (rc < 0)
 			return rc;
 
-		gpio_reg_2p85v = regulator_get(&mipi_dsi_device.dev,
-								"lcd_vdd");
-		if (IS_ERR(gpio_reg_2p85v)) {
-			pr_err("%s:ext_2p85v regulator get failed", __func__);
-			return -EINVAL;
-		}
-
-		gpio_reg_1p8v = regulator_get(&mipi_dsi_device.dev,
-								"lcd_vddi");
-		if (IS_ERR(gpio_reg_1p8v)) {
-			pr_err("%s:ext_1p8v regulator get failed", __func__);
-			return -EINVAL;
-		}
-
 		qrd3_dsi_gpio_initialized = 1;
+
+		if (mdp_pdata.cont_splash_enabled) {
+			rc = gpio_tlmm_config(GPIO_CFG(
+			     GPIO_QRD3_LCD_BACKLIGHT_EN, 0, GPIO_CFG_OUTPUT,
+			     GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE);
+			if (rc < 0) {
+				pr_err("failed QRD3 GPIO_BACKLIGHT_EN tlmm config\n");
+				return rc;
+			}
+			rc = gpio_direction_output(GPIO_QRD3_LCD_BACKLIGHT_EN,
+			     1);
+			if (rc < 0) {
+				pr_err("failed to enable backlight\n");
+				gpio_free(GPIO_QRD3_LCD_BACKLIGHT_EN);
+				return rc;
+			}
+
+			/*Configure LCD Bridge reset*/
+			rc = gpio_tlmm_config(qrd3_mipi_dsi_gpio[0],
+			     GPIO_CFG_ENABLE);
+			if (rc < 0) {
+				pr_err("Failed to enable LCD Bridge reset enable\n");
+				return rc;
+			}
+
+			rc = gpio_direction_output(GPIO_QRD3_LCD_BRDG_RESET_N,
+			     1);
+
+			if (rc < 0) {
+				pr_err("Failed GPIO bridge Reset\n");
+				gpio_free(GPIO_QRD3_LCD_BRDG_RESET_N);
+				return rc;
+			}
+			return 0;
+		}
 	}
 
 	if (on) {
@@ -1246,6 +1285,7 @@
 	return rc;
 }
 
+static char mipi_dsi_splash_is_enabled(void);
 static int mipi_dsi_panel_power(int on)
 {
 	int rc = 0;
@@ -1268,9 +1308,15 @@
 	.dsi_power_save		= mipi_dsi_panel_power,
 	.dsi_client_reset       = msm_fb_dsi_client_reset,
 	.get_lane_config	= msm_fb_get_lane_config,
+	.splash_is_enabled	= mipi_dsi_splash_is_enabled,
 };
 #endif
 
+static char mipi_dsi_splash_is_enabled(void)
+{
+	return mdp_pdata.cont_splash_enabled;
+}
+
 static char prim_panel_name[PANEL_NAME_MAX_LEN];
 static int __init prim_display_setup(char *param)
 {
@@ -1280,6 +1326,8 @@
 }
 early_param("prim_display", prim_display_setup);
 
+static int disable_splash;
+
 void msm7x27a_set_display_params(char *prim_panel)
 {
 	if (strnlen(prim_panel, PANEL_NAME_MAX_LEN)) {
@@ -1288,10 +1336,22 @@
 		pr_debug("msm_fb_pdata.prim_panel_name %s\n",
 			msm_fb_pdata.prim_panel_name);
 	}
+	if (strnlen(msm_fb_pdata.prim_panel_name, PANEL_NAME_MAX_LEN)) {
+		if (strncmp((char *)msm_fb_pdata.prim_panel_name,
+			"mipi_cmd_nt35510_wvga",
+			strnlen("mipi_cmd_nt35510_wvga",
+				PANEL_NAME_MAX_LEN)) &&
+		    strncmp((char *)msm_fb_pdata.prim_panel_name,
+			"mipi_video_nt35510_wvga",
+			strnlen("mipi_video_nt35510_wvga",
+				PANEL_NAME_MAX_LEN)))
+			disable_splash = 1;
+	}
 }
 
 void __init msm_fb_add_devices(void)
 {
+	int rc = 0;
 	msm7x27a_set_display_params(prim_panel_name);
 	if (machine_is_msm7627a_qrd1())
 		platform_add_devices(qrd_fb_devices,
@@ -1300,15 +1360,22 @@
 						|| machine_is_msm8625_evt()) {
 		mipi_NT35510_pdata.bl_lock = 1;
 		mipi_NT35516_pdata.bl_lock = 1;
+		if (disable_splash)
+			mdp_pdata.cont_splash_enabled = 0x0;
+
+
 		platform_add_devices(evb_fb_devices,
 				ARRAY_SIZE(evb_fb_devices));
 	} else if (machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7()) {
 		sku3_lcdc_lcd_camera_power_init();
+		mdp_pdata.cont_splash_enabled = 0x1;
 		platform_add_devices(qrd3_fb_devices,
 						ARRAY_SIZE(qrd3_fb_devices));
-	} else
+	} else {
+		mdp_pdata.cont_splash_enabled = 0x0;
 		platform_add_devices(msm_fb_devices,
 				ARRAY_SIZE(msm_fb_devices));
+	}
 
 	msm_fb_register_device("mdp", &mdp_pdata);
 	if (machine_is_msm7625a_surf() || machine_is_msm7x27a_surf() ||
@@ -1318,4 +1385,26 @@
 #ifdef CONFIG_FB_MSM_MIPI_DSI
 	msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
 #endif
+	if (machine_is_msm7627a_evb() || machine_is_msm8625_evb()
+					|| machine_is_msm8625_evt()) {
+		gpio_reg_2p85v = regulator_get(&mipi_dsi_device.dev,
+								"lcd_vdd");
+		if (IS_ERR(gpio_reg_2p85v))
+			pr_err("%s:ext_2p85v regulator get failed", __func__);
+
+		gpio_reg_1p8v = regulator_get(&mipi_dsi_device.dev,
+								"lcd_vddi");
+		if (IS_ERR(gpio_reg_1p8v))
+			pr_err("%s:ext_1p8v regulator get failed", __func__);
+
+		if (mdp_pdata.cont_splash_enabled) {
+			/*Enable EXT_2.85 and 1.8 regulators*/
+			rc = regulator_enable(gpio_reg_2p85v);
+			if (rc < 0)
+				pr_err("%s: reg enable failed\n", __func__);
+			rc = regulator_enable(gpio_reg_1p8v);
+			if (rc < 0)
+				pr_err("%s: reg enable failed\n", __func__);
+		}
+	}
 }
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 56b774d..1827773 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -3200,12 +3200,36 @@
 	F_END
 };
 
+static struct branch_clk dsi1_reset_clk = {
+	.b = {
+		.reset_reg = SW_RESET_CORE_REG,
+		.reset_mask = BIT(7),
+		.halt_check = NOCHECK,
+	},
+	.c = {
+		.dbg_name = "dsi1_reset_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(dsi1_reset_clk.c),
+	},
+};
+
+static struct branch_clk dsi2_reset_clk = {
+	.b = {
+		.reset_reg = SW_RESET_CORE_REG,
+		.reset_mask = BIT(25),
+		.halt_check = NOCHECK,
+	},
+	.c = {
+		.dbg_name = "dsi2_reset_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(dsi2_reset_clk.c),
+	},
+};
+
 static struct rcg_clk dsi1_byte_clk = {
 	.b = {
 		.ctl_reg = DSI1_BYTE_CC_REG,
 		.en_mask = BIT(0),
-		.reset_reg = SW_RESET_CORE_REG,
-		.reset_mask = BIT(7),
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 21,
 		.retain_reg = DSI1_BYTE_CC_REG,
@@ -3228,8 +3252,6 @@
 	.b = {
 		.ctl_reg = DSI2_BYTE_CC_REG,
 		.en_mask = BIT(0),
-		.reset_reg = SW_RESET_CORE_REG,
-		.reset_mask = BIT(25),
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 20,
 		.retain_reg = DSI2_BYTE_CC_REG,
@@ -3252,7 +3274,6 @@
 	.b = {
 		.ctl_reg = DSI1_ESC_CC_REG,
 		.en_mask = BIT(0),
-		.reset_reg = SW_RESET_CORE_REG,
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 1,
 	},
@@ -4543,6 +4564,7 @@
 	}
 static struct clk_freq_tbl clk_tbl_pcm_492[] = {
 	{ .ns_val = BIT(10) /* external input */ },
+	F_PCM(  256000, pll4, 4, 1, 480),
 	F_PCM(  512000, pll4, 4, 1, 240),
 	F_PCM(  768000, pll4, 4, 1, 160),
 	F_PCM( 1024000, pll4, 4, 1, 120),
@@ -4559,6 +4581,7 @@
 
 static struct clk_freq_tbl clk_tbl_pcm_393[] = {
 	{ .ns_val = BIT(10) /* external input */ },
+	F_PCM(  256000, pll4, 4, 1, 384),
 	F_PCM(  512000, pll4, 4, 1, 192),
 	F_PCM(  768000, pll4, 4, 1, 128),
 	F_PCM( 1024000, pll4, 4, 1,  96),
@@ -5433,6 +5456,9 @@
 	CLK_LOOKUP("mem_clk",		ebi1_acpu_a_clk.c, ""),
 	CLK_LOOKUP("bus_clk",		afab_acpu_a_clk.c, ""),
 
+	CLK_LOOKUP("reset1_clk",	dsi1_reset_clk.c, "footswitch-8x60.4"),
+	CLK_LOOKUP("reset2_clk",	dsi2_reset_clk.c, "footswitch-8x60.4"),
+
 	CLK_LOOKUP("l2_mclk",		l2_m_clk,     ""),
 	CLK_LOOKUP("krait0_mclk",	krait0_m_clk, ""),
 	CLK_LOOKUP("krait1_mclk",	krait1_m_clk, ""),
@@ -5750,6 +5776,9 @@
 	CLK_LOOKUP("mem_clk",		ebi1_acpu_a_clk.c, ""),
 	CLK_LOOKUP("bus_clk",		afab_acpu_a_clk.c, ""),
 
+	CLK_LOOKUP("reset1_clk",	dsi1_reset_clk.c, "footswitch-8x60.4"),
+	CLK_LOOKUP("reset2_clk",	dsi2_reset_clk.c, "footswitch-8x60.4"),
+
 	CLK_LOOKUP("l2_mclk",		l2_m_clk,     ""),
 	CLK_LOOKUP("krait0_mclk",	krait0_m_clk, ""),
 	CLK_LOOKUP("krait1_mclk",	krait1_m_clk, ""),
@@ -6075,6 +6104,8 @@
 	CLK_LOOKUP("mem_clk",		ebi1_acpu_a_clk.c, ""),
 	CLK_LOOKUP("bus_clk",		afab_acpu_a_clk.c, ""),
 
+	CLK_LOOKUP("reset1_clk",	dsi1_reset_clk.c, "footswitch-8x60.4"),
+
 	CLK_LOOKUP("l2_mclk",		l2_m_clk,     ""),
 	CLK_LOOKUP("krait0_mclk",	krait0_m_clk, ""),
 	CLK_LOOKUP("krait1_mclk",	krait1_m_clk, ""),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 82b7fd6..7dd3829 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -21,6 +21,7 @@
 
 #include <mach/clk.h>
 #include <mach/rpm-regulator-smd.h>
+#include <mach/socinfo.h>
 
 #include "clock-local2.h"
 #include "clock-pll.h"
@@ -759,6 +760,9 @@
 static DEFINE_CLK_VOTER(pnoc_sdcc3_clk, &pnoc_clk.c, 0);
 static DEFINE_CLK_VOTER(pnoc_sdcc4_clk, &pnoc_clk.c, 0);
 
+static DEFINE_CLK_VOTER(pnoc_sps_clk, &pnoc_clk.c, 0);
+static DEFINE_CLK_VOTER(pnoc_qseecom_clk, &pnoc_clk.c, 0);
+
 static struct clk_freq_tbl ftbl_gcc_usb30_master_clk[] = {
 	F(125000000,  gpll0,   1,   5,  24),
 	F_END
@@ -1270,6 +1274,12 @@
 	F_END
 };
 
+static struct clk_freq_tbl ftbl_gcc_sdcc_apps_rumi_clk[] = {
+	F(   400000,    cxo,  12,   1,   4),
+	F( 19200000,    cxo,  1,    0,   0),
+	F_END
+};
+
 static struct rcg_clk sdcc1_apps_clk_src = {
 	.cmd_rcgr_reg = SDCC1_APPS_CMD_RCGR,
 	.set_rate = set_rate_mnd,
@@ -2274,7 +2284,7 @@
 	F_MM( 19200000,    cxo,     1,   0,   0),
 	F_MM(150000000,  gpll0,     4,   0,   0),
 	F_MM(282000000, mmpll1,     3,   0,   0),
-	F_MM(320000000, mmpll1,   2.5,   0,   0),
+	F_MM(320000000, mmpll0,   2.5,   0,   0),
 	F_MM(400000000, mmpll0,     2,   0,   0),
 	F_END
 };
@@ -4346,72 +4356,76 @@
 };
 
 struct measure_mux_entry measure_mux[] = {
-	{&gcc_bam_dma_ahb_clk.c,		GCC_BASE, 0x00e8},
-	{&gcc_blsp1_ahb_clk.c,			GCC_BASE, 0x0090},
-	{&gcc_blsp1_qup1_i2c_apps_clk.c,	GCC_BASE, 0x0093},
-	{&gcc_blsp1_qup1_spi_apps_clk.c,	GCC_BASE, 0x0092},
-	{&gcc_blsp1_qup2_i2c_apps_clk.c,	GCC_BASE, 0x0098},
-	{&gcc_blsp1_qup2_spi_apps_clk.c,	GCC_BASE, 0x0096},
-	{&gcc_blsp1_qup3_i2c_apps_clk.c,	GCC_BASE, 0x009c},
-	{&gcc_blsp1_qup3_spi_apps_clk.c,	GCC_BASE, 0x009b},
-	{&gcc_blsp1_qup4_i2c_apps_clk.c,	GCC_BASE, 0x00a1},
-	{&gcc_blsp1_qup4_spi_apps_clk.c,	GCC_BASE, 0x00a0},
-	{&gcc_blsp1_qup5_i2c_apps_clk.c,	GCC_BASE, 0x00a5},
-	{&gcc_blsp1_qup5_spi_apps_clk.c,	GCC_BASE, 0x00a4},
-	{&gcc_blsp1_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00aa},
-	{&gcc_blsp1_qup6_spi_apps_clk.c,	GCC_BASE, 0x00a9},
-	{&gcc_blsp1_uart1_apps_clk.c,		GCC_BASE, 0x0094},
-	{&gcc_blsp1_uart2_apps_clk.c,		GCC_BASE, 0x0099},
-	{&gcc_blsp1_uart3_apps_clk.c,		GCC_BASE, 0x009d},
-	{&gcc_blsp1_uart4_apps_clk.c,		GCC_BASE, 0x00a2},
-	{&gcc_blsp1_uart5_apps_clk.c,		GCC_BASE, 0x00a6},
-	{&gcc_blsp1_uart6_apps_clk.c,		GCC_BASE, 0x00ab},
-	{&gcc_blsp2_ahb_clk.c,			GCC_BASE, 0x00b0},
-	{&gcc_blsp2_qup1_i2c_apps_clk.c,	GCC_BASE, 0x00b3},
-	{&gcc_blsp2_qup1_spi_apps_clk.c,	GCC_BASE, 0x00b2},
-	{&gcc_blsp2_qup2_i2c_apps_clk.c,	GCC_BASE, 0x00b8},
-	{&gcc_blsp2_qup2_spi_apps_clk.c,	GCC_BASE, 0x00b6},
-	{&gcc_blsp2_qup3_i2c_apps_clk.c,	GCC_BASE, 0x00bc},
-	{&gcc_blsp2_qup3_spi_apps_clk.c,	GCC_BASE, 0x00bb},
-	{&gcc_blsp2_qup4_i2c_apps_clk.c,	GCC_BASE, 0x00c1},
-	{&gcc_blsp2_qup4_spi_apps_clk.c,	GCC_BASE, 0x00c0},
-	{&gcc_blsp2_qup5_i2c_apps_clk.c,	GCC_BASE, 0x00c5},
-	{&gcc_blsp2_qup5_spi_apps_clk.c,	GCC_BASE, 0x00c4},
-	{&gcc_blsp2_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00ca},
-	{&gcc_blsp2_qup6_spi_apps_clk.c,	GCC_BASE, 0x00c9},
-	{&gcc_blsp2_uart1_apps_clk.c,		GCC_BASE, 0x00b4},
-	{&gcc_blsp2_uart2_apps_clk.c,		GCC_BASE, 0x00b9},
-	{&gcc_blsp2_uart3_apps_clk.c,		GCC_BASE, 0x00bd},
-	{&gcc_blsp2_uart4_apps_clk.c,		GCC_BASE, 0x00c2},
-	{&gcc_blsp2_uart5_apps_clk.c,		GCC_BASE, 0x00c6},
-	{&gcc_blsp2_uart6_apps_clk.c,		GCC_BASE, 0x00cb},
-	{&gcc_boot_rom_ahb_clk.c,		GCC_BASE, 0x0100},
-	{&gcc_ocmem_noc_cfg_ahb_clk.c,		GCC_BASE, 0x0029},
-	{&gcc_mmss_noc_cfg_ahb_clk.c,		GCC_BASE, 0x002A},
-	{&gcc_mss_cfg_ahb_clk.c,		GCC_BASE, 0x0030},
-	{&gcc_ce1_clk.c,			GCC_BASE, 0x0140},
-	{&gcc_ce2_clk.c,			GCC_BASE, 0x0148},
-	{&gcc_pdm2_clk.c,			GCC_BASE, 0x00da},
-	{&gcc_pdm_ahb_clk.c,			GCC_BASE, 0x00d8},
-	{&gcc_prng_ahb_clk.c,			GCC_BASE, 0x00e0},
-	{&gcc_sdcc1_ahb_clk.c,			GCC_BASE, 0x0071},
-	{&gcc_sdcc1_apps_clk.c,			GCC_BASE, 0x0070},
-	{&gcc_sdcc2_ahb_clk.c,			GCC_BASE, 0x0079},
-	{&gcc_sdcc2_apps_clk.c,			GCC_BASE, 0x0078},
-	{&gcc_sdcc3_ahb_clk.c,			GCC_BASE, 0x0081},
-	{&gcc_sdcc3_apps_clk.c,			GCC_BASE, 0x0080},
-	{&gcc_sdcc4_ahb_clk.c,			GCC_BASE, 0x0089},
-	{&gcc_sdcc4_apps_clk.c,			GCC_BASE, 0x0088},
-	{&gcc_tsif_ahb_clk.c,			GCC_BASE, 0x00f0},
-	{&gcc_tsif_ref_clk.c,			GCC_BASE, 0x00f1},
+	{&gcc_pdm_ahb_clk.c,			GCC_BASE, 0x00d0},
+	{&gcc_blsp2_qup1_i2c_apps_clk.c,	GCC_BASE, 0x00ab},
+	{&gcc_blsp2_qup3_spi_apps_clk.c,	GCC_BASE, 0x00b3},
+	{&gcc_blsp2_uart5_apps_clk.c,		GCC_BASE, 0x00be},
 	{&gcc_usb30_master_clk.c,		GCC_BASE, 0x0050},
+	{&gcc_blsp2_qup3_i2c_apps_clk.c,	GCC_BASE, 0x00b4},
+	{&gcc_usb_hsic_system_clk.c,		GCC_BASE, 0x0059},
+	{&gcc_blsp2_uart3_apps_clk.c,		GCC_BASE, 0x00b5},
+	{&gcc_usb_hsic_io_cal_clk.c,		GCC_BASE, 0x005b},
+	{&gcc_ce2_axi_clk.c,			GCC_BASE, 0x0141},
+	{&gcc_sdcc3_ahb_clk.c,			GCC_BASE, 0x0079},
+	{&gcc_blsp1_qup5_i2c_apps_clk.c,	GCC_BASE, 0x009d},
+	{&gcc_blsp1_qup1_spi_apps_clk.c,	GCC_BASE, 0x008a},
+	{&gcc_blsp2_uart4_apps_clk.c,		GCC_BASE, 0x00ba},
+	{&gcc_ce2_clk.c,			GCC_BASE, 0x0140},
+	{&gcc_blsp1_uart2_apps_clk.c,		GCC_BASE, 0x0091},
+	{&gcc_sdcc1_ahb_clk.c,			GCC_BASE, 0x0069},
+	{&gcc_mss_cfg_ahb_clk.c,		GCC_BASE, 0x0030},
+	{&gcc_tsif_ahb_clk.c,			GCC_BASE, 0x00e8},
+	{&gcc_sdcc4_ahb_clk.c,			GCC_BASE, 0x0081},
+	{&gcc_blsp1_qup4_spi_apps_clk.c,	GCC_BASE, 0x0098},
+	{&gcc_blsp2_qup4_spi_apps_clk.c,	GCC_BASE, 0x00b8},
+	{&gcc_blsp1_qup3_spi_apps_clk.c,	GCC_BASE, 0x0093},
+	{&gcc_blsp1_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00a2},
+	{&gcc_blsp2_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00c2},
+	{&gcc_bam_dma_ahb_clk.c,		GCC_BASE, 0x00e0},
+	{&gcc_sdcc3_apps_clk.c,			GCC_BASE, 0x0078},
+	{&gcc_usb_hs_system_clk.c,		GCC_BASE, 0x0060},
+	{&gcc_blsp1_ahb_clk.c,			GCC_BASE, 0x0088},
+	{&gcc_sdcc1_apps_clk.c,			GCC_BASE, 0x0068},
+	{&gcc_blsp2_qup5_i2c_apps_clk.c,	GCC_BASE, 0x00bd},
+	{&gcc_blsp1_uart4_apps_clk.c,		GCC_BASE, 0x009a},
+	{&gcc_blsp2_qup2_spi_apps_clk.c,	GCC_BASE, 0x00ae},
+	{&gcc_blsp2_qup6_spi_apps_clk.c,	GCC_BASE, 0x00c1},
+	{&gcc_blsp2_uart2_apps_clk.c,		GCC_BASE, 0x00b1},
+	{&gcc_blsp1_qup2_spi_apps_clk.c,	GCC_BASE, 0x008e},
+	{&gcc_usb_hsic_ahb_clk.c,		GCC_BASE, 0x0058},
+	{&gcc_blsp1_uart3_apps_clk.c,		GCC_BASE, 0x0095},
 	{&gcc_usb30_mock_utmi_clk.c,		GCC_BASE, 0x0052},
-	{&gcc_usb_hs_ahb_clk.c,			GCC_BASE, 0x0069},
-	{&gcc_usb_hs_system_clk.c,		GCC_BASE, 0x0068},
-	{&gcc_usb_hsic_ahb_clk.c,		GCC_BASE, 0x0060},
-	{&gcc_usb_hsic_clk.c,			GCC_BASE, 0x0062},
-	{&gcc_usb_hsic_io_cal_clk.c,		GCC_BASE, 0x0063},
-	{&gcc_usb_hsic_system_clk.c,		GCC_BASE, 0x0061},
+	{&gcc_ce1_axi_clk.c,			GCC_BASE, 0x0139},
+	{&gcc_sdcc4_apps_clk.c,			GCC_BASE, 0x0080},
+	{&gcc_blsp1_qup5_spi_apps_clk.c,	GCC_BASE, 0x009c},
+	{&gcc_usb_hs_ahb_clk.c,			GCC_BASE, 0x0061},
+	{&gcc_blsp1_qup6_spi_apps_clk.c,	GCC_BASE, 0x00a1},
+	{&gcc_blsp2_qup2_i2c_apps_clk.c,	GCC_BASE, 0x00b0},
+	{&gcc_prng_ahb_clk.c,			GCC_BASE, 0x00d8},
+	{&gcc_blsp1_qup3_i2c_apps_clk.c,	GCC_BASE, 0x0094},
+	{&gcc_usb_hsic_clk.c,			GCC_BASE, 0x005a},
+	{&gcc_blsp1_uart6_apps_clk.c,		GCC_BASE, 0x00a3},
+	{&gcc_sdcc2_apps_clk.c,			GCC_BASE, 0x0070},
+	{&gcc_tsif_ref_clk.c,			GCC_BASE, 0x00e9},
+	{&gcc_blsp1_uart1_apps_clk.c,		GCC_BASE, 0x008c},
+	{&gcc_blsp2_qup5_spi_apps_clk.c,	GCC_BASE, 0x00bc},
+	{&gcc_blsp1_qup4_i2c_apps_clk.c,	GCC_BASE, 0x0099},
+	{&gcc_mmss_noc_cfg_ahb_clk.c,		GCC_BASE, 0x002a},
+	{&gcc_blsp2_ahb_clk.c,			GCC_BASE, 0x00a8},
+	{&gcc_boot_rom_ahb_clk.c,		GCC_BASE, 0x00f8},
+	{&gcc_ce1_ahb_clk.c,			GCC_BASE, 0x013a},
+	{&gcc_pdm2_clk.c,			GCC_BASE, 0x00d2},
+	{&gcc_blsp2_qup4_i2c_apps_clk.c,	GCC_BASE, 0x00b9},
+	{&gcc_ce2_ahb_clk.c,			GCC_BASE, 0x0142},
+	{&gcc_blsp1_uart5_apps_clk.c,		GCC_BASE, 0x009e},
+	{&gcc_blsp2_qup1_spi_apps_clk.c,	GCC_BASE, 0x00aa},
+	{&gcc_blsp1_qup2_i2c_apps_clk.c,	GCC_BASE, 0x0090},
+	{&gcc_blsp2_uart1_apps_clk.c,		GCC_BASE, 0x00ac},
+	{&gcc_blsp1_qup1_i2c_apps_clk.c,	GCC_BASE, 0x008b},
+	{&gcc_blsp2_uart6_apps_clk.c,		GCC_BASE, 0x00c3},
+	{&gcc_sdcc2_ahb_clk.c,			GCC_BASE, 0x0071},
+	{&gcc_ocmem_noc_cfg_ahb_clk.c,		GCC_BASE, 0x0029},
+	{&gcc_ce1_clk.c,			GCC_BASE, 0x0138},
 	{&mmss_mmssnoc_ahb_clk.c,		MMSS_BASE, 0x0001},
 	{&mmss_mmssnoc_axi_clk.c,		MMSS_BASE, 0x0004},
 	{&ocmemnoc_clk.c,			MMSS_BASE, 0x0007},
@@ -4681,6 +4695,48 @@
 	.multiplier = 1,
 };
 
+
+static struct clk_lookup msm_clocks_8974_rumi[] = {
+	CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "msm_sdcc.1"),
+	CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "msm_sdcc.1"),
+	CLK_LOOKUP("bus_clk", pnoc_sdcc1_clk.c, "msm_sdcc.1"),
+	CLK_LOOKUP("iface_clk", gcc_sdcc2_ahb_clk.c, "msm_sdcc.2"),
+	CLK_LOOKUP("core_clk", gcc_sdcc2_apps_clk.c, "msm_sdcc.2"),
+	CLK_LOOKUP("bus_clk", pnoc_sdcc2_clk.c, "msm_sdcc.2"),
+	CLK_LOOKUP("iface_clk", gcc_sdcc3_ahb_clk.c, "msm_sdcc.3"),
+	CLK_LOOKUP("core_clk", gcc_sdcc3_apps_clk.c, "msm_sdcc.3"),
+	CLK_LOOKUP("bus_clk", pnoc_sdcc3_clk.c, "msm_sdcc.3"),
+	CLK_LOOKUP("iface_clk", gcc_sdcc4_ahb_clk.c, "msm_sdcc.4"),
+	CLK_LOOKUP("core_clk", gcc_sdcc4_apps_clk.c, "msm_sdcc.4"),
+	CLK_LOOKUP("bus_clk", pnoc_sdcc4_clk.c, "msm_sdcc.4"),
+	CLK_DUMMY("xo",		XO_CLK,		NULL,	OFF),
+	CLK_DUMMY("xo",		XO_CLK,		"pil_pronto",		OFF),
+	CLK_DUMMY("core_clk",	BLSP2_UART_CLK,	"msm_serial_hsl.0",	OFF),
+	CLK_DUMMY("iface_clk",	BLSP2_UART_CLK,	"msm_serial_hsl.0",	OFF),
+	CLK_DUMMY("core_clk",	SDC1_CLK,	NULL,			OFF),
+	CLK_DUMMY("iface_clk",	SDC1_P_CLK,	NULL,			OFF),
+	CLK_DUMMY("core_clk",	SDC3_CLK,	NULL,			OFF),
+	CLK_DUMMY("iface_clk",	SDC3_P_CLK,	NULL,			OFF),
+	CLK_DUMMY("phy_clk", NULL, "msm_otg", OFF),
+	CLK_DUMMY("core_clk", NULL, "msm_otg", OFF),
+	CLK_DUMMY("iface_clk", NULL, "msm_otg", OFF),
+	CLK_DUMMY("xo", NULL, "msm_otg", OFF),
+	CLK_DUMMY("dfab_clk",	DFAB_CLK,	NULL, 0),
+	CLK_DUMMY("dma_bam_pclk",	DMA_BAM_P_CLK,	NULL, 0),
+	CLK_DUMMY("mem_clk",	NULL,	NULL, 0),
+	CLK_DUMMY("core_clk",	SPI_CLK,	"spi_qsd.1",	OFF),
+	CLK_DUMMY("iface_clk",	SPI_P_CLK,	"spi_qsd.1",	OFF),
+	CLK_DUMMY("core_clk",	NULL,	"f9966000.i2c", 0),
+	CLK_DUMMY("iface_clk",	NULL,	"f9966000.i2c", 0),
+	CLK_DUMMY("core_clk",	NULL,	"fe12f000.slim",	OFF),
+	CLK_DUMMY("core_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("core_clk_src", "mdp.0", NULL, 0),
+	CLK_DUMMY("lut_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("vsync_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("iface_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("bus_clk", "mdp.0", NULL, 0),
+};
+
 static struct clk_lookup msm_clocks_8974[] = {
 	CLK_LOOKUP("xo",	cxo_clk_src.c,	"msm_otg"),
 	CLK_LOOKUP("xo",	cxo_clk_src.c,	"pil-q6v5-lpass"),
@@ -4904,14 +4960,19 @@
 	CLK_LOOKUP("osr_clk", audio_core_lpaif_quad_osr_clk.c, ""),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_quad_ebit_clk.c, ""),
 	CLK_LOOKUP("ibit_clk", audio_core_lpaif_quad_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm0_clk_src.c, ""),
+	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm0_clk_src.c,
+						"msm-dai-q6.4106"),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm0_ebit_clk.c, ""),
+	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c,
+						"msm-dai-q6.4106"),
 	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c, ""),
 	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm1_clk_src.c, ""),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
 	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk_src", audio_core_lpaif_pcmoe_clk_src.c, ""),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pcmoe_clk.c, ""),
+	CLK_LOOKUP("core_oe_src_clk", audio_core_lpaif_pcmoe_clk_src.c,
+						"msm-dai-q6.4106"),
+	CLK_LOOKUP("core_oe_clk", audio_core_lpaif_pcmoe_clk.c,
+						"msm-dai-q6.4106"),
 
 	CLK_LOOKUP("core_clk",       mss_xo_q6_clk.c, "pil-q6v5-mss"),
 	CLK_LOOKUP("bus_clk",       mss_bus_q6_clk.c, "pil-q6v5-mss"),
@@ -4921,11 +4982,8 @@
 	CLK_LOOKUP("bus_clk",  q6ss_ahb_lfabif_clk.c, "pil-q6v5-lpass"),
 	CLK_LOOKUP("core_clk", gcc_prng_ahb_clk.c, "msm_rng"),
 
-	/* TODO: Remove dummy clocks as soon as they become unnecessary */
-	CLK_DUMMY("dfab_clk",  DFAB_CLK,    "msm_sps", OFF),
-	CLK_DUMMY("mem_clk",       NULL,    "msm_sps", OFF),
-	CLK_DUMMY("bus_clk",       NULL,        "scm", OFF),
-	CLK_DUMMY("bus_clk",       NULL,    "qseecom", OFF),
+	CLK_LOOKUP("dfab_clk", pnoc_sps_clk.c, "msm_sps"),
+	CLK_LOOKUP("bus_clk",  pnoc_qseecom_clk.c, "qseecom"),
 
 	CLK_LOOKUP("bus_clk", snoc_clk.c, ""),
 	CLK_LOOKUP("bus_clk", pnoc_clk.c, ""),
@@ -5265,7 +5323,7 @@
 
 	vdd_dig_reg = rpm_regulator_get(NULL, "vdd_dig");
 	if (IS_ERR(vdd_dig_reg))
-		panic("clock-copper: Unable to get the vdd_dig regulator!");
+		panic("clock-8974: Unable to get the vdd_dig regulator!");
 
 	/*
 	 * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
@@ -5284,6 +5342,32 @@
 	return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
 }
 
+static void __init msm8974_rumi_clock_pre_init(void)
+{
+	virt_bases[GCC_BASE] = ioremap(GCC_CC_PHYS, GCC_CC_SIZE);
+	if (!virt_bases[GCC_BASE])
+		panic("clock-8974: Unable to ioremap GCC memory!");
+
+	/* SDCC clocks are partially emulated in the RUMI */
+	sdcc1_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+	sdcc2_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+	sdcc3_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+	sdcc4_apps_clk_src.freq_tbl = ftbl_gcc_sdcc_apps_rumi_clk;
+
+	vdd_dig_reg = rpm_regulator_get(NULL, "vdd_dig");
+	if (IS_ERR(vdd_dig_reg))
+		panic("clock-8974: Unable to get the vdd_dig regulator!");
+
+	/*
+	 * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
+	 * until late_init. This may not be necessary with clock handoff;
+	 * Investigate this code on a real non-simulator target to determine
+	 * its necessity.
+	 */
+	vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+	rpm_regulator_enable(vdd_dig_reg);
+}
+
 struct clock_init_data msm8974_clock_init_data __initdata = {
 	.table = msm_clocks_8974,
 	.size = ARRAY_SIZE(msm_clocks_8974),
@@ -5291,3 +5375,9 @@
 	.post_init = msm8974_clock_post_init,
 	.late_init = msm8974_clock_late_init,
 };
+
+struct clock_init_data msm8974_rumi_clock_init_data __initdata = {
+	.table = msm_clocks_8974_rumi,
+	.size = ARRAY_SIZE(msm_clocks_8974_rumi),
+	.pre_init = msm8974_rumi_clock_pre_init,
+};
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index df2aa4e..d236e13 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -171,6 +171,7 @@
 extern struct clock_init_data msm8625_dummy_clock_init_data;
 extern struct clock_init_data msm8930_clock_init_data;
 extern struct clock_init_data msm8974_clock_init_data;
+extern struct clock_init_data msm8974_rumi_clock_init_data;
 
 void msm_clock_init(struct clock_init_data *data);
 int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index a6f18ba..1f0bd2c 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -1337,19 +1337,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC1_DML_BASE,
 		.end	= MSM_SDC1_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC1_BAM_BASE,
 		.end	= MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC1_BAM_IRQ,
 		.end	= SDC1_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -1372,19 +1372,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC2_DML_BASE,
 		.end	= MSM_SDC2_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC2_BAM_BASE,
 		.end	= MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC2_BAM_IRQ,
 		.end	= SDC2_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -1407,19 +1407,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC3_DML_BASE,
 		.end	= MSM_SDC3_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC3_BAM_BASE,
 		.end	= MSM_SDC3_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC3_BAM_IRQ,
 		.end	= SDC3_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -1442,19 +1442,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC4_DML_BASE,
 		.end	= MSM_SDC4_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC4_BAM_BASE,
 		.end	= MSM_SDC4_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC4_BAM_IRQ,
 		.end	= SDC4_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -1837,6 +1837,8 @@
 		{ .name = "lut_clk" },
 		{ .name = "tv_src_clk" },
 		{ .name = "tv_clk" },
+		{ .name = "reset1_clk" },
+		{ .name = "reset2_clk" },
 		{ 0 }
 	},
 	.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index ffa3c38..fa24ba9 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -425,6 +425,7 @@
 		{ .name = "lut_clk" },
 		{ .name = "tv_src_clk" },
 		{ .name = "tv_clk" },
+		{ .name = "reset1_clk" },
 		{ 0 }
 	},
 	.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
@@ -667,6 +668,58 @@
 		.ib  = 10000000,
 	},
 };
+static struct msm_bus_vectors vidc_venc_1080p_turbo_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 222298112,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 330301440,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 700000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 10000000,
+	},
+};
+static struct msm_bus_vectors vidc_vdec_1080p_turbo_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 222298112,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 330301440,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 700000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 10000000,
+	},
+};
 
 static struct msm_bus_paths vidc_bus_client_config[] = {
 	{
@@ -697,6 +750,14 @@
 		ARRAY_SIZE(vidc_vdec_1080p_vectors),
 		vidc_vdec_1080p_vectors,
 	},
+	{
+		ARRAY_SIZE(vidc_venc_1080p_turbo_vectors),
+		vidc_vdec_1080p_turbo_vectors,
+	},
+	{
+		ARRAY_SIZE(vidc_vdec_1080p_turbo_vectors),
+		vidc_vdec_1080p_turbo_vectors,
+	},
 };
 
 static struct msm_bus_scale_pdata vidc_bus_client_data = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index b1ebe33..724eed8 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -819,19 +819,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC1_DML_BASE,
 		.end	= MSM_SDC1_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC1_BAM_BASE,
 		.end	= MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC1_BAM_IRQ,
 		.end	= SDC1_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -854,19 +854,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC2_DML_BASE,
 		.end	= MSM_SDC2_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC2_BAM_BASE,
 		.end	= MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC2_BAM_IRQ,
 		.end	= SDC2_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -889,19 +889,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC3_DML_BASE,
 		.end	= MSM_SDC3_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC3_BAM_BASE,
 		.end	= MSM_SDC3_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC3_BAM_IRQ,
 		.end	= SDC3_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -924,19 +924,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC4_DML_BASE,
 		.end	= MSM_SDC4_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC4_BAM_BASE,
 		.end	= MSM_SDC4_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC4_BAM_IRQ,
 		.end	= SDC4_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -959,19 +959,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC5_DML_BASE,
 		.end	= MSM_SDC5_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC5_BAM_BASE,
 		.end	= MSM_SDC5_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC5_BAM_IRQ,
 		.end	= SDC5_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
@@ -2151,6 +2151,8 @@
 		{ .name = "lut_clk" },
 		{ .name = "tv_src_clk" },
 		{ .name = "tv_clk" },
+		{ .name = "reset1_clk" },
+		{ .name = "reset2_clk" },
 		{ 0 }
 	},
 	.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 9c2b26a..9f03878 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -845,19 +845,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start  = MSM_SDC1_DML_BASE,
 		.end    = MSM_SDC1_BAM_BASE - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start  = MSM_SDC1_BAM_BASE,
 		.end    = MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start  = SDC1_BAM_IRQ,
 		.end    = SDC1_BAM_IRQ,
 		.flags  = IORESOURCE_IRQ,
@@ -880,19 +880,19 @@
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start  = MSM_SDC2_DML_BASE,
 		.end    = MSM_SDC2_BAM_BASE - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start  = MSM_SDC2_BAM_BASE,
 		.end    = MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags  = IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start  = SDC2_BAM_IRQ,
 		.end    = SDC2_BAM_IRQ,
 		.flags  = IORESOURCE_IRQ,
diff --git a/arch/arm/mach-msm/devices-msm7x25.c b/arch/arm/mach-msm/devices-msm7x25.c
index 2be7d5e..99b2960 100644
--- a/arch/arm/mach-msm/devices-msm7x25.c
+++ b/arch/arm/mach-msm/devices-msm7x25.c
@@ -439,23 +439,25 @@
 #define MSM_SDC4_BASE         0xA0700000
 static struct resource resources_sdc1[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC1_BASE,
 		.end	= MSM_SDC1_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= INT_SDC1_0,
 		.end	= INT_SDC1_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC1_CHAN,
 		.end	= DMOV_SDC1_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC1_CRCI,
 		.end	= DMOV_SDC1_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -464,23 +466,25 @@
 
 static struct resource resources_sdc2[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC2_BASE,
 		.end	= MSM_SDC2_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC2_0,
 		.end	= INT_SDC2_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC2_CHAN,
 		.end	= DMOV_SDC2_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC2_CRCI,
 		.end	= DMOV_SDC2_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -489,23 +493,25 @@
 
 static struct resource resources_sdc3[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC3_BASE,
 		.end	= MSM_SDC3_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC3_0,
 		.end	= INT_SDC3_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC3_CHAN,
 		.end	= DMOV_SDC3_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC3_CRCI,
 		.end	= DMOV_SDC3_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -514,23 +520,25 @@
 
 static struct resource resources_sdc4[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC4_BASE,
 		.end	= MSM_SDC4_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC4_0,
 		.end	= INT_SDC4_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC4_CHAN,
 		.end	= DMOV_SDC4_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC4_CRCI,
 		.end	= DMOV_SDC4_CRCI,
 		.flags	= IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices-msm7x27.c b/arch/arm/mach-msm/devices-msm7x27.c
index 69d7430..82c5eed 100644
--- a/arch/arm/mach-msm/devices-msm7x27.c
+++ b/arch/arm/mach-msm/devices-msm7x27.c
@@ -448,23 +448,25 @@
 #define MSM_SDC4_BASE         0xA0700000
 static struct resource resources_sdc1[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC1_BASE,
 		.end	= MSM_SDC1_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= INT_SDC1_0,
 		.end	= INT_SDC1_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC1_CHAN,
 		.end	= DMOV_SDC1_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC1_CRCI,
 		.end	= DMOV_SDC1_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -473,23 +475,25 @@
 
 static struct resource resources_sdc2[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC2_BASE,
 		.end	= MSM_SDC2_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC2_0,
 		.end	= INT_SDC2_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC2_CHAN,
 		.end	= DMOV_SDC2_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC2_CRCI,
 		.end	= DMOV_SDC2_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -498,23 +502,25 @@
 
 static struct resource resources_sdc3[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC3_BASE,
 		.end	= MSM_SDC3_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC3_0,
 		.end	= INT_SDC3_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC3_CHAN,
 		.end	= DMOV_SDC3_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC3_CRCI,
 		.end	= DMOV_SDC3_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -523,23 +529,25 @@
 
 static struct resource resources_sdc4[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC4_BASE,
 		.end	= MSM_SDC4_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC4_0,
 		.end	= INT_SDC4_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC4_CHAN,
 		.end	= DMOV_SDC4_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC4_CRCI,
 		.end	= DMOV_SDC4_CRCI,
 		.flags	= IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 96984fb..2642864 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -507,23 +507,25 @@
 #define MSM_SDC4_BASE         0xA0700000
 static struct resource resources_sdc1[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC1_BASE,
 		.end	= MSM_SDC1_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= INT_SDC1_0,
 		.end	= INT_SDC1_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC1_CHAN,
 		.end	= DMOV_SDC1_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC1_CRCI,
 		.end	= DMOV_SDC1_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -532,23 +534,25 @@
 
 static struct resource resources_sdc2[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC2_BASE,
 		.end	= MSM_SDC2_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC2_0,
 		.end	= INT_SDC2_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC2_CHAN,
 		.end	= DMOV_SDC2_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC2_CRCI,
 		.end	= DMOV_SDC2_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -557,23 +561,25 @@
 
 static struct resource resources_sdc3[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC3_BASE,
 		.end	= MSM_SDC3_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC3_0,
 		.end	= INT_SDC3_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_NAND_CHAN,
 		.end	= DMOV_NAND_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC3_CRCI,
 		.end	= DMOV_SDC3_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -582,23 +588,25 @@
 
 static struct resource resources_sdc4[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC4_BASE,
 		.end	= MSM_SDC4_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC4_0,
 		.end	= INT_SDC4_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC4_CHAN,
 		.end	= DMOV_SDC4_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC4_CRCI,
 		.end	= DMOV_SDC4_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1175,23 +1183,25 @@
 
 static struct resource msm8625_resources_sdc1[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC1_BASE,
 		.end	= MSM_SDC1_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= MSM8625_INT_SDC1_0,
 		.end	= MSM8625_INT_SDC1_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC1_CHAN,
 		.end	= DMOV_SDC1_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC1_CRCI,
 		.end	= DMOV_SDC1_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1200,23 +1210,25 @@
 
 static struct resource msm8625_resources_sdc2[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC2_BASE,
 		.end	= MSM_SDC2_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= MSM8625_INT_SDC2_0,
 		.end	= MSM8625_INT_SDC2_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC2_CHAN,
 		.end	= DMOV_SDC2_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC2_CRCI,
 		.end	= DMOV_SDC2_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1225,23 +1237,25 @@
 
 static struct resource msm8625_resources_sdc3[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC3_BASE,
 		.end	= MSM_SDC3_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= MSM8625_INT_SDC3_0,
 		.end	= MSM8625_INT_SDC3_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC3_CHAN,
 		.end	= DMOV_SDC3_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC3_CRCI,
 		.end	= DMOV_SDC3_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1250,23 +1264,25 @@
 
 static struct resource msm8625_resources_sdc4[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC4_BASE,
 		.end	= MSM_SDC4_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= MSM8625_INT_SDC4_0,
 		.end	= MSM8625_INT_SDC4_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC4_CHAN,
 		.end	= DMOV_SDC4_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC4_CRCI,
 		.end	= DMOV_SDC4_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1645,18 +1661,23 @@
 	/* Part number for 1GHz part */
 	case 0x770:
 	case 0x771:
+	case 0x77C:
 	case 0x780:
+	case 0x8D0:
 		cpu = MSM8625;
 		break;
 	/* Part number for 1.2GHz part */
 	case 0x773:
 	case 0x774:
 	case 0x781:
+	case 0x8D1:
 		cpu = MSM8625A;
 		break;
 	case 0x775:
 	case 0x776:
+	case 0x77D:
 	case 0x782:
+	case 0x8D2:
 		cpu = MSM8625AB;
 		break;
 	default:
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index f04ef9d..a6473c6 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -26,7 +26,7 @@
 #include <mach/dma.h>
 #include <mach/board.h>
 #include <asm/clkdev.h>
-
+#include <linux/ion.h>
 #include "devices.h"
 #include "footswitch.h"
 
@@ -784,23 +784,25 @@
 #define MSM_SDC4_BASE         0xA3100000
 static struct resource resources_sdc1[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC1_BASE,
 		.end	= MSM_SDC1_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= INT_SDC1_0,
 		.end	= INT_SDC1_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC1_CHAN,
 		.end	= DMOV_SDC1_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC1_CRCI,
 		.end	= DMOV_SDC1_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -809,23 +811,25 @@
 
 static struct resource resources_sdc2[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC2_BASE,
 		.end	= MSM_SDC2_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= INT_SDC2_0,
 		.end	= INT_SDC2_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_NAND_CHAN,
 		.end	= DMOV_NAND_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC2_CRCI,
 		.end	= DMOV_SDC2_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -834,23 +838,25 @@
 
 static struct resource resources_sdc3[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC3_BASE,
 		.end	= MSM_SDC3_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= INT_SDC3_0,
 		.end	= INT_SDC3_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC3_CHAN,
 		.end	= DMOV_SDC3_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC3_CRCI,
 		.end	= DMOV_SDC3_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -859,23 +865,25 @@
 
 static struct resource resources_sdc4[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC4_BASE,
 		.end	= MSM_SDC4_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= INT_SDC4_0,
 		.end	= INT_SDC4_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC4_CHAN,
 		.end	= DMOV_SDC4_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC4_CRCI,
 		.end	= DMOV_SDC4_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -955,8 +963,8 @@
 };
 
 struct msm_vidc_platform_data vidc_platform_data = {
-	.memtype = MEMTYPE_EBI0,
-	.enable_ion = 0,
+	.memtype = ION_CAMERA_HEAP_ID,
+	.enable_ion = 1,
 	.disable_dmx = 0,
 	.cont_mode_dpb_count = 8
 };
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index 3920abe..9ea817f 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -1091,43 +1091,45 @@
 
 static struct resource resources_sdc1[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC1_BASE,
 		.end	= MSM_SDC1_DML_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= SDC1_IRQ_0,
 		.end	= SDC1_IRQ_0,
 		.flags	= IORESOURCE_IRQ,
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC1_DML_BASE,
 		.end	= MSM_SDC1_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC1_BAM_BASE,
 		.end	= MSM_SDC1_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC1_BAM_IRQ,
 		.end	= SDC1_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
 	},
 #else
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC1_CHAN,
 		.end	= DMOV_SDC1_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC1_CRCI,
 		.end	= DMOV_SDC1_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1137,43 +1139,45 @@
 
 static struct resource resources_sdc2[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC2_BASE,
 		.end	= MSM_SDC2_DML_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= SDC2_IRQ_0,
 		.end	= SDC2_IRQ_0,
 		.flags	= IORESOURCE_IRQ,
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC2_DML_BASE,
 		.end	= MSM_SDC2_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC2_BAM_BASE,
 		.end	= MSM_SDC2_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC2_BAM_IRQ,
 		.end	= SDC2_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
 	},
 #else
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC2_CHAN,
 		.end	= DMOV_SDC2_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC2_CRCI,
 		.end	= DMOV_SDC2_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1183,43 +1187,45 @@
 
 static struct resource resources_sdc3[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC3_BASE,
 		.end	= MSM_SDC3_DML_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= SDC3_IRQ_0,
 		.end	= SDC3_IRQ_0,
 		.flags	= IORESOURCE_IRQ,
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC3_DML_BASE,
 		.end	= MSM_SDC3_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC3_BAM_BASE,
 		.end	= MSM_SDC3_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC3_BAM_IRQ,
 		.end	= SDC3_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
 	},
 #else
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC3_CHAN,
 		.end	= DMOV_SDC3_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC3_CRCI,
 		.end	= DMOV_SDC3_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1229,43 +1235,45 @@
 
 static struct resource resources_sdc4[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC4_BASE,
 		.end	= MSM_SDC4_DML_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= SDC4_IRQ_0,
 		.end	= SDC4_IRQ_0,
 		.flags	= IORESOURCE_IRQ,
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC4_DML_BASE,
 		.end	= MSM_SDC4_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC4_BAM_BASE,
 		.end	= MSM_SDC4_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC4_BAM_IRQ,
 		.end	= SDC4_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
 	},
 #else
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC4_CHAN,
 		.end	= DMOV_SDC4_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC4_CRCI,
 		.end	= DMOV_SDC4_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -1275,43 +1283,45 @@
 
 static struct resource resources_sdc5[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC5_BASE,
 		.end	= MSM_SDC5_DML_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name	= "core_irq",
 		.start	= SDC5_IRQ_0,
 		.end	= SDC5_IRQ_0,
 		.flags	= IORESOURCE_IRQ,
 	},
 #ifdef CONFIG_MMC_MSM_SPS_SUPPORT
 	{
-		.name   = "sdcc_dml_addr",
+		.name   = "dml_mem",
 		.start	= MSM_SDC5_DML_BASE,
 		.end	= MSM_SDC5_BAM_BASE - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_addr",
+		.name   = "bam_mem",
 		.start	= MSM_SDC5_BAM_BASE,
 		.end	= MSM_SDC5_BAM_BASE + (2 * SZ_4K) - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.name   = "sdcc_bam_irq",
+		.name   = "bam_irq",
 		.start	= SDC5_BAM_IRQ,
 		.end	= SDC5_BAM_IRQ,
 		.flags	= IORESOURCE_IRQ,
 	},
 #else
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC5_CHAN,
 		.end	= DMOV_SDC5_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC5_CRCI,
 		.end	= DMOV_SDC5_CRCI,
 		.flags	= IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
index ec4a14f..03ffa2f 100644
--- a/arch/arm/mach-msm/devices-qsd8x50.c
+++ b/arch/arm/mach-msm/devices-qsd8x50.c
@@ -490,23 +490,25 @@
 #define MSM_SDC4_BASE         0xA0600000
 static struct resource resources_sdc1[] = {
 	{
+		.name	= "core_mem",
 		.start	= MSM_SDC1_BASE,
 		.end	= MSM_SDC1_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC1_0,
 		.end	= INT_SDC1_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC1_CHAN,
 		.end	= DMOV_SDC1_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC1_CRCI,
 		.end	= DMOV_SDC1_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -515,23 +517,25 @@
 
 static struct resource resources_sdc2[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC2_BASE,
 		.end	= MSM_SDC2_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC2_0,
 		.end	= INT_SDC2_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC2_CHAN,
 		.end	= DMOV_SDC2_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC2_CRCI,
 		.end	= DMOV_SDC2_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -540,23 +544,25 @@
 
 static struct resource resources_sdc3[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC3_BASE,
 		.end	= MSM_SDC3_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC3_0,
 		.end	= INT_SDC3_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC3_CHAN,
 		.end	= DMOV_SDC3_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC3_CRCI,
 		.end	= DMOV_SDC3_CRCI,
 		.flags	= IORESOURCE_DMA,
@@ -565,23 +571,25 @@
 
 static struct resource resources_sdc4[] = {
 	{
+		.name   = "core_mem",
 		.start	= MSM_SDC4_BASE,
 		.end	= MSM_SDC4_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
+		.name   = "core_irq",
 		.start	= INT_SDC4_0,
 		.end	= INT_SDC4_1,
 		.flags	= IORESOURCE_IRQ,
 	},
 	{
-		.name	= "sdcc_dma_chnl",
+		.name	= "dma_chnl",
 		.start	= DMOV_SDC4_CHAN,
 		.end	= DMOV_SDC4_CHAN,
 		.flags	= IORESOURCE_DMA,
 	},
 	{
-		.name	= "sdcc_dma_crci",
+		.name	= "dma_crci",
 		.start	= DMOV_SDC4_CRCI,
 		.end	= DMOV_SDC4_CRCI,
 		.flags	= IORESOURCE_DMA,
diff --git a/arch/arm/mach-msm/include/mach/cpuidle.h b/arch/arm/mach-msm/include/mach/cpuidle.h
index 2a5aa97..8566e7f 100644
--- a/arch/arm/mach-msm/include/mach/cpuidle.h
+++ b/arch/arm/mach-msm/include/mach/cpuidle.h
@@ -25,12 +25,16 @@
 	enum msm_pm_sleep_mode mode_nr;
 };
 
-#ifdef CONFIG_CPU_IDLE
+#ifdef CONFIG_PM
 s32 msm_cpuidle_get_deep_idle_latency(void);
+#else
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_IDLE
 int msm_cpuidle_init(void);
 #else
 static inline int msm_cpuidle_init(void) { return -ENOSYS; }
-static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
 #endif
 
 #ifdef CONFIG_MSM_SLEEP_STATS
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index c4877cc..6ec12c1 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -32,6 +32,7 @@
 	struct platform_device *peripheral_platform_device;
 	const unsigned int ramdump_timeout_ms;
 	int image_upgrade_supported;
+	struct gpiomux_setting *mdm2ap_status_gpio_run_cfg;
 };
 
 #endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index bf92f7d..6b7ad9a 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -78,6 +78,7 @@
 int platform_physical_remove_pages(u64, u64);
 int platform_physical_active_pages(u64, u64);
 int platform_physical_low_power_pages(u64, u64);
+int msm_get_memory_type_from_name(const char *memtype_name);
 
 extern int (*change_memory_power)(u64, u64, int);
 
diff --git a/arch/arm/mach-msm/include/mach/msm_hsusb.h b/arch/arm/mach-msm/include/mach/msm_hsusb.h
index 4f140cc..4e22b0f 100644
--- a/arch/arm/mach-msm/include/mach/msm_hsusb.h
+++ b/arch/arm/mach-msm/include/mach/msm_hsusb.h
@@ -202,8 +202,4 @@
 	struct clk *ebi1_clk;
 };
 
-int msm_ep_config(struct usb_ep *ep);
-int msm_ep_unconfig(struct usb_ep *ep);
-int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size);
-
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_pcie.h b/arch/arm/mach-msm/include/mach/msm_pcie.h
index 8bc4317..74f0f5b 100644
--- a/arch/arm/mach-msm/include/mach/msm_pcie.h
+++ b/arch/arm/mach-msm/include/mach/msm_pcie.h
@@ -22,7 +22,7 @@
 	MSM_PCIE_MAX_GPIO
 };
 
-/* gpio info structrue */
+/* gpio info structure */
 struct msm_pcie_gpio_info_t {
 	char      *name;
 	uint32_t   num;
@@ -32,8 +32,10 @@
 /* msm pcie platfrom data */
 struct msm_pcie_platform {
 	struct msm_pcie_gpio_info_t  *gpio;
+
 	uint32_t                      axi_addr;
 	uint32_t                      axi_size;
+	uint32_t                      wake_n;
 };
 
 #endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index 415f8ed..904de5e 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -47,6 +47,24 @@
 	struct ocmem_chunk chunks[OCMEM_MAX_CHUNKS];
 };
 
+enum ocmem_power_state {
+	OCMEM_OFF = 0x0,
+	OCMEM_RETENTION,
+	OCMEM_ON,
+	OCMEM_MAX = OCMEM_ON,
+};
+
+struct ocmem_resource {
+	unsigned resource_id;
+	unsigned num_keys;
+	unsigned int *keys;
+};
+
+struct ocmem_vectors {
+	unsigned num_resources;
+	struct ocmem_resource *r;
+};
+
 /* List of clients that allocate/interact with OCMEM */
 /* Must be in sync with client_names */
 enum ocmem_client {
@@ -120,4 +138,14 @@
 int ocmem_evict(int client_id);
 
 int ocmem_restore(int client_id);
+
+/* Power Control APIs */
+int ocmem_set_power_state(int client_id, struct ocmem_buf *buf,
+				enum ocmem_power_state new_state);
+
+enum ocmem_power_state ocmem_get_power_state(int client_id,
+				struct ocmem_buf *buf);
+
+struct ocmem_vectors *ocmem_get_vectors(int client_id,
+						struct ocmem_buf *buf);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 70b5a45..49e283d 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -16,11 +16,11 @@
 /** All interfaces in this header should only be used by OCMEM driver
  *  Client drivers should use wrappers available in ocmem.h
  **/
-
-#include "ocmem.h"
-#include <mach/msm_iomap.h>
-#include <asm/io.h>
 #include <linux/platform_device.h>
+#include <asm/io.h>
+#include <mach/msm_iomap.h>
+#include "ocmem.h"
+
 
 #define OCMEM_PHYS_BASE 0xFEC00000
 #define OCMEM_PHYS_SIZE 0x180000
@@ -62,6 +62,13 @@
 	SCHED_DUMP,
 };
 
+/* Operational modes of each region */
+enum region_mode {
+	WIDE_MODE = 0x0,
+	THIN_MODE,
+	MODE_DEFAULT = WIDE_MODE,
+};
+
 struct ocmem_plat_data {
 	void __iomem *vbase;
 	unsigned long size;
@@ -77,6 +84,8 @@
 	int ocmem_irq;
 	int dm_irq;
 	bool interleaved;
+	bool rpm_pwr_ctrl;
+	unsigned rpm_rsc_type;
 };
 
 struct ocmem_eviction_data {
@@ -113,6 +122,8 @@
 	unsigned long req_start;
 	unsigned long req_end;
 	unsigned long req_sz;
+	/* Request Power State */
+	unsigned power_state;
 	struct ocmem_eviction_data *edata;
 };
 
@@ -154,6 +165,18 @@
 		return NULL;
 }
 
+/* Simple wrappers which will have debug features added later */
+static inline int ocmem_read(void *at)
+{
+	return readl_relaxed(at);
+}
+
+static inline int ocmem_write(unsigned long val, void *at)
+{
+	writel_relaxed(val, at);
+	return 0;
+}
+
 struct ocmem_zone *get_zone(unsigned);
 unsigned long offset_to_phys(unsigned long);
 unsigned long phys_to_offset(unsigned long);
@@ -170,6 +193,7 @@
 
 int ocmem_sched_init(void);
 int ocmem_rdm_init(struct platform_device *);
+int ocmem_core_init(struct platform_device *);
 int process_allocate(int, struct ocmem_handle *, unsigned long, unsigned long,
 			unsigned long, bool, bool);
 int process_free(int, struct ocmem_handle *);
@@ -180,4 +204,6 @@
 int ocmem_rdm_transfer(int, struct ocmem_map_list *,
 				unsigned long, int);
 unsigned long process_quota(int);
+int ocmem_memory_off(int, unsigned long, unsigned long);
+int ocmem_memory_on(int, unsigned long, unsigned long);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/usb_bam.h b/arch/arm/mach-msm/include/mach/usb_bam.h
index ec135a3..47313a7 100644
--- a/arch/arm/mach-msm/include/mach/usb_bam.h
+++ b/arch/arm/mach-msm/include/mach/usb_bam.h
@@ -12,6 +12,7 @@
 
 #ifndef _USB_BAM_H_
 #define _USB_BAM_H_
+#include "sps.h"
 
 /**
  * SPS Pipes direction.
@@ -43,7 +44,7 @@
  * @return 0 on success, negative value on error
  *
  */
-int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx);
+int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx);
 
 /**
  * Register a wakeup callback from peer BAM.
@@ -57,8 +58,40 @@
  */
 int usb_bam_register_wake_cb(u8 idx,
 	 int (*callback)(void *), void* param);
+
+/**
+ * Disconnect USB-to-Periperal SPS connection.
+ *
+ * @idx - Connection index.
+ *
+ * @return 0 on success, negative value on error
+ */
+int usb_bam_disconnect_pipe(u8 idx);
+
+/**
+ * Returns usb bam connection parameters.
+ *
+ * @conn_idx - Connection index.
+ *
+ * @usb_bam_pipe_dir - Usb pipe direction to/from peripheral.
+ *
+ * @usb_bam_handle - Usb bam handle.
+ *
+ * @usb_bam_pipe_idx - Usb bam pipe index.
+ *
+ * @peer_pipe_idx - Peer pipe index.
+ *
+ * @desc_fifo - Descriptor fifo parameters.
+ *
+ * @data_fifo - Data fifo parameters.
+ *
+ */
+void get_bam2bam_connection_info(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+	u32 *usb_bam_handle, u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+	struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo);
+
 #else
-static inline int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx)
+static inline int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx)
 {
 	return -ENODEV;
 }
@@ -68,6 +101,18 @@
 {
 	return -ENODEV;
 }
+
+static inline int usb_bam_disconnect_pipe(u8 idx)
+{
+	return -ENODEV;
+}
+
+static inline void get_bam2bam_connection_info(u8 conn_idx,
+	enum usb_bam_pipe_dir pipe_dir, u32 *usb_bam_handle,
+	u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+	struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo)
+{
+	return;
+}
 #endif
 #endif				/* _USB_BAM_H_ */
-
diff --git a/arch/arm/mach-msm/ipc_router.c b/arch/arm/mach-msm/ipc_router.c
index c0bff63..7dc8d0f 100644
--- a/arch/arm/mach-msm/ipc_router.c
+++ b/arch/arm/mach-msm/ipc_router.c
@@ -130,7 +130,6 @@
 	uint32_t remote_node_id;
 	uint32_t initialized;
 	struct list_head pkt_list;
-	wait_queue_head_t read_wait;
 	struct wake_lock wakelock;
 	struct mutex rx_lock;
 	struct mutex tx_lock;
@@ -262,16 +261,15 @@
 		return NULL;
 
 	mutex_lock(&xprt_info->rx_lock);
-	while (!(xprt_info->abort_data_read) &&
-		list_empty(&xprt_info->pkt_list)) {
-		mutex_unlock(&xprt_info->rx_lock);
-		wait_event(xprt_info->read_wait,
-			   ((xprt_info->abort_data_read) ||
-			   !list_empty(&xprt_info->pkt_list)));
-		mutex_lock(&xprt_info->rx_lock);
-	}
 	if (xprt_info->abort_data_read) {
 		mutex_unlock(&xprt_info->rx_lock);
+		pr_err("%s detected SSR & exiting now\n",
+			xprt_info->xprt->name);
+		return NULL;
+	}
+
+	if (list_empty(&xprt_info->pkt_list)) {
+		mutex_unlock(&xprt_info->rx_lock);
 		return NULL;
 	}
 
@@ -1364,144 +1362,135 @@
 			     struct msm_ipc_router_xprt_info,
 			     read_data);
 
-	pkt = rr_read(xprt_info);
-	if (!pkt) {
-		pr_err("%s: rr_read failed\n", __func__);
-		goto fail_io;
-	}
+	while ((pkt = rr_read(xprt_info)) != NULL) {
+		if (pkt->length < IPC_ROUTER_HDR_SIZE ||
+		    pkt->length > MAX_IPC_PKT_SIZE) {
+			pr_err("%s: Invalid pkt length %d\n",
+				__func__, pkt->length);
+			goto fail_data;
+		}
 
-	if (pkt->length < IPC_ROUTER_HDR_SIZE ||
-	    pkt->length > MAX_IPC_PKT_SIZE) {
-		pr_err("%s: Invalid pkt length %d\n", __func__, pkt->length);
-		goto fail_data;
-	}
+		head_skb = skb_peek(pkt->pkt_fragment_q);
+		if (!head_skb) {
+			pr_err("%s: head_skb is invalid\n", __func__);
+			goto fail_data;
+		}
 
-	head_skb = skb_peek(pkt->pkt_fragment_q);
-	if (!head_skb) {
-		pr_err("%s: head_skb is invalid\n", __func__);
-		goto fail_data;
-	}
+		hdr = (struct rr_header *)(head_skb->data);
+		RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
+		   hdr->version, hdr->type, hdr->src_node_id, hdr->src_port_id,
+		   hdr->confirm_rx, hdr->size, hdr->dst_node_id,
+		   hdr->dst_port_id);
 
-	hdr = (struct rr_header *)(head_skb->data);
-	RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
-	   hdr->version, hdr->type, hdr->src_node_id, hdr->src_port_id,
-	   hdr->confirm_rx, hdr->size, hdr->dst_node_id, hdr->dst_port_id);
-	RAW_HDR("[r rr_h] "
-		"ver=%i,type=%s,src_node_id=%08x,src_port_id=%08x,"
-		"confirm_rx=%i,size=%3i,dst_node_id=%08x,dst_port_id=%08x\n",
-		hdr->version, type_to_str(hdr->type), hdr->src_node_id,
-		hdr->src_port_id, hdr->confirm_rx, hdr->size, hdr->dst_node_id,
-		hdr->dst_port_id);
+		if (hdr->version != IPC_ROUTER_VERSION) {
+			pr_err("version %d != %d\n",
+				hdr->version, IPC_ROUTER_VERSION);
+			goto fail_data;
+		}
 
-	if (hdr->version != IPC_ROUTER_VERSION) {
-		pr_err("version %d != %d\n", hdr->version, IPC_ROUTER_VERSION);
-		goto fail_data;
-	}
+		if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
+		    ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
+		     (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
+			forward_msg(xprt_info, pkt);
+			release_pkt(pkt);
+			continue;
+		}
 
-	if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
-	    ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
-	     (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
-		forward_msg(xprt_info, pkt);
-		release_pkt(pkt);
-		goto done;
-	}
-
-	if ((hdr->dst_port_id == IPC_ROUTER_ADDRESS) ||
-	    (hdr->type == IPC_ROUTER_CTRL_CMD_HELLO)) {
-		process_control_msg(xprt_info, pkt);
-		release_pkt(pkt);
-		goto done;
-	}
+		if ((hdr->dst_port_id == IPC_ROUTER_ADDRESS) ||
+		    (hdr->type == IPC_ROUTER_CTRL_CMD_HELLO)) {
+			process_control_msg(xprt_info, pkt);
+			release_pkt(pkt);
+			continue;
+		}
 #if defined(CONFIG_MSM_SMD_LOGGING)
 #if defined(DEBUG)
-	if (msm_ipc_router_debug_mask & SMEM_LOG) {
-		smem_log_event((SMEM_LOG_PROC_ID_APPS |
-			SMEM_LOG_RPC_ROUTER_EVENT_BASE |
-			IPC_ROUTER_LOG_EVENT_RX),
-			(hdr->src_node_id << 24) |
-			(hdr->src_port_id & 0xffffff),
-			(hdr->dst_node_id << 24) |
-			(hdr->dst_port_id & 0xffffff),
-			(hdr->type << 24) | (hdr->confirm_rx << 16) |
-			(hdr->size & 0xffff));
-	}
+		if (msm_ipc_router_debug_mask & SMEM_LOG) {
+			smem_log_event((SMEM_LOG_PROC_ID_APPS |
+				SMEM_LOG_RPC_ROUTER_EVENT_BASE |
+				IPC_ROUTER_LOG_EVENT_RX),
+				(hdr->src_node_id << 24) |
+				(hdr->src_port_id & 0xffffff),
+				(hdr->dst_node_id << 24) |
+				(hdr->dst_port_id & 0xffffff),
+				(hdr->type << 24) | (hdr->confirm_rx << 16) |
+				(hdr->size & 0xffff));
+		}
 #endif
 #endif
 
-	resume_tx = hdr->confirm_rx;
-	resume_tx_node_id = hdr->dst_node_id;
-	resume_tx_port_id = hdr->dst_port_id;
+		resume_tx = hdr->confirm_rx;
+		resume_tx_node_id = hdr->dst_node_id;
+		resume_tx_port_id = hdr->dst_port_id;
 
-	rport_ptr = msm_ipc_router_lookup_remote_port(hdr->src_node_id,
+		rport_ptr = msm_ipc_router_lookup_remote_port(hdr->src_node_id,
 						      hdr->src_port_id);
 
-	mutex_lock(&local_ports_lock);
-	port_ptr = msm_ipc_router_lookup_local_port(hdr->dst_port_id);
-	if (!port_ptr) {
-		pr_err("%s: No local port id %08x\n", __func__,
-			hdr->dst_port_id);
-		mutex_unlock(&local_ports_lock);
-		release_pkt(pkt);
-		goto process_done;
-	}
-
-	if (!rport_ptr) {
-		rport_ptr = msm_ipc_router_create_remote_port(
-							hdr->src_node_id,
-							hdr->src_port_id);
-		if (!rport_ptr) {
-			pr_err("%s: Remote port %08x:%08x creation failed\n",
-				__func__, hdr->src_node_id, hdr->src_port_id);
+		mutex_lock(&local_ports_lock);
+		port_ptr = msm_ipc_router_lookup_local_port(hdr->dst_port_id);
+		if (!port_ptr) {
+			pr_err("%s: No local port id %08x\n", __func__,
+				hdr->dst_port_id);
 			mutex_unlock(&local_ports_lock);
+			release_pkt(pkt);
 			goto process_done;
 		}
-	}
 
-	if (!port_ptr->notify) {
-		mutex_lock(&port_ptr->port_rx_q_lock);
-		wake_lock(&port_ptr->port_rx_wake_lock);
-		list_add_tail(&pkt->list, &port_ptr->port_rx_q);
-		wake_up(&port_ptr->port_rx_wait_q);
-		mutex_unlock(&port_ptr->port_rx_q_lock);
-		mutex_unlock(&local_ports_lock);
-	} else {
-		mutex_lock(&port_ptr->port_rx_q_lock);
-		src_addr = kmalloc(sizeof(struct msm_ipc_port_addr),
-				   GFP_KERNEL);
-		if (src_addr) {
-			src_addr->node_id = hdr->src_node_id;
-			src_addr->port_id = hdr->src_port_id;
+		if (!rport_ptr) {
+			rport_ptr = msm_ipc_router_create_remote_port(
+							hdr->src_node_id,
+							hdr->src_port_id);
+			if (!rport_ptr) {
+				pr_err("%s: Rmt Prt %08x:%08x create failed\n",
+					__func__, hdr->src_node_id,
+					hdr->src_port_id);
+				mutex_unlock(&local_ports_lock);
+				goto process_done;
+			}
 		}
-		skb_pull(head_skb, IPC_ROUTER_HDR_SIZE);
-		mutex_unlock(&local_ports_lock);
-		port_ptr->notify(MSM_IPC_ROUTER_READ_CB, pkt->pkt_fragment_q,
-				 src_addr, port_ptr->priv);
-		mutex_unlock(&port_ptr->port_rx_q_lock);
-		pkt->pkt_fragment_q = NULL;
-		src_addr = NULL;
-		release_pkt(pkt);
-	}
+
+		if (!port_ptr->notify) {
+			mutex_lock(&port_ptr->port_rx_q_lock);
+			wake_lock(&port_ptr->port_rx_wake_lock);
+			list_add_tail(&pkt->list, &port_ptr->port_rx_q);
+			wake_up(&port_ptr->port_rx_wait_q);
+			mutex_unlock(&port_ptr->port_rx_q_lock);
+			mutex_unlock(&local_ports_lock);
+		} else {
+			mutex_lock(&port_ptr->port_rx_q_lock);
+			src_addr = kmalloc(sizeof(struct msm_ipc_port_addr),
+					   GFP_KERNEL);
+			if (src_addr) {
+				src_addr->node_id = hdr->src_node_id;
+				src_addr->port_id = hdr->src_port_id;
+			}
+			skb_pull(head_skb, IPC_ROUTER_HDR_SIZE);
+			mutex_unlock(&local_ports_lock);
+			port_ptr->notify(MSM_IPC_ROUTER_READ_CB,
+				pkt->pkt_fragment_q, src_addr, port_ptr->priv);
+			mutex_unlock(&port_ptr->port_rx_q_lock);
+			pkt->pkt_fragment_q = NULL;
+			src_addr = NULL;
+			release_pkt(pkt);
+		}
 
 process_done:
-	if (resume_tx) {
-		union rr_control_msg msg;
+		if (resume_tx) {
+			union rr_control_msg msg;
 
-		msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
-		msg.cli.node_id = resume_tx_node_id;
-		msg.cli.port_id = resume_tx_port_id;
+			msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
+			msg.cli.node_id = resume_tx_node_id;
+			msg.cli.port_id = resume_tx_port_id;
 
-		RR("x RESUME_TX id=%d:%08x\n",
-		   msg.cli.node_id, msg.cli.port_id);
-		msm_ipc_router_send_control_msg(xprt_info, &msg);
+			RR("x RESUME_TX id=%d:%08x\n",
+			   msg.cli.node_id, msg.cli.port_id);
+			msm_ipc_router_send_control_msg(xprt_info, &msg);
+		}
+
 	}
-
-done:
-	queue_work(xprt_info->workqueue, &xprt_info->read_data);
 	return;
 
 fail_data:
 	release_pkt(pkt);
-fail_io:
 	pr_err("ipc_router has died\n");
 }
 
@@ -2334,7 +2323,6 @@
 	xprt_info->initialized = 0;
 	xprt_info->remote_node_id = -1;
 	INIT_LIST_HEAD(&xprt_info->pkt_list);
-	init_waitqueue_head(&xprt_info->read_wait);
 	mutex_init(&xprt_info->rx_lock);
 	mutex_init(&xprt_info->tx_lock);
 	wake_lock_init(&xprt_info->wakelock,
@@ -2368,8 +2356,6 @@
 	}
 	mutex_unlock(&routing_table_lock);
 
-	queue_work(xprt_info->workqueue, &xprt_info->read_data);
-
 	xprt->priv = xprt_info;
 
 	return 0;
@@ -2382,8 +2368,9 @@
 	if (xprt && xprt->priv) {
 		xprt_info = xprt->priv;
 
+		mutex_lock(&xprt_info->rx_lock);
 		xprt_info->abort_data_read = 1;
-		wake_up(&xprt_info->read_wait);
+		mutex_unlock(&xprt_info->rx_lock);
 
 		mutex_lock(&xprt_info_list_lock);
 		list_del(&xprt_info->list);
@@ -2481,8 +2468,8 @@
 	mutex_lock(&xprt_info->rx_lock);
 	list_add_tail(&pkt->list, &xprt_info->pkt_list);
 	wake_lock(&xprt_info->wakelock);
-	wake_up(&xprt_info->read_wait);
 	mutex_unlock(&xprt_info->rx_lock);
+	queue_work(xprt_info->workqueue, &xprt_info->read_data);
 }
 
 static int modem_restart_notifier_cb(struct notifier_block *this,
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index e74af2e..07f3efc 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -44,7 +44,7 @@
 
 #define MDM_PBLRDY_CNT		20
 
-static int mdm_debug_on;
+static int mdm_debug_mask;
 static int power_on_count;
 static int hsic_peripheral_status;
 static DEFINE_MUTEX(hsic_status_lock);
@@ -234,7 +234,7 @@
 
 static void debug_state_changed(int value)
 {
-	mdm_debug_on = value;
+	mdm_debug_mask = value;
 }
 
 static void mdm_status_changed(struct mdm_modem_drv *mdm_drv, int value)
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 6b40cda..6ca9045 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -30,6 +30,7 @@
 #include <linux/workqueue.h>
 #include <linux/clk.h>
 #include <linux/mfd/pmic8058.h>
+#include <linux/msm_charm.h>
 #include <asm/mach-types.h>
 #include <asm/uaccess.h>
 #include <mach/mdm2.h>
@@ -37,7 +38,7 @@
 #include <mach/subsystem_notif.h>
 #include <mach/subsystem_restart.h>
 #include <mach/rpm.h>
-#include <linux/msm_charm.h>
+#include <mach/gpiomux.h>
 #include "msm_watchdog.h"
 #include "mdm_private.h"
 #include "sysmon.h"
@@ -48,10 +49,11 @@
 #define MDM_RDUMP_TIMEOUT	120000L
 #define MDM2AP_STATUS_TIMEOUT_MS 60000L
 
-static int mdm_debug_on;
+static unsigned int mdm_debug_mask;
 static struct workqueue_struct *mdm_queue;
 static struct workqueue_struct *mdm_sfr_queue;
 static unsigned int dump_timeout_ms;
+static int vddmin_gpios_sent;
 
 #define EXTERNAL_MODEM "external_modem"
 
@@ -68,6 +70,13 @@
 #define SFR_MAX_RETRIES		10
 #define SFR_RETRY_INTERVAL	1000
 
+enum gpio_update_config {
+	GPIO_UPDATE_BOOTING_CONFIG = 1,
+	GPIO_UPDATE_RUNNING_CONFIG,
+};
+static int mdm2ap_status_valid_old_config;
+static struct gpiomux_setting mdm2ap_status_old_config;
+
 static irqreturn_t mdm_vddmin_change(int irq, void *dev_id)
 {
 	int value = gpio_get_value(
@@ -92,6 +101,7 @@
 	if (!vddmin_res)
 		return;
 
+	pr_info("Enabling vddmin logging\n");
 	req.id = vddmin_res->rpm_id;
 	req.value = ((uint32_t)vddmin_res->ap2mdm_vddmin_gpio & 0x0000FFFF)
 							<< 16;
@@ -100,7 +110,7 @@
 
 	msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
 
-	/* Monitor low power gpio from mdm */
+	/* Start monitoring low power gpio from mdm */
 	irq = MSM_GPIO_TO_INT(vddmin_res->mdm2ap_vddmin_gpio);
 	if (irq < 0) {
 		pr_err("%s: could not get LPM POWER IRQ resource.\n",
@@ -163,6 +173,37 @@
 
 static DECLARE_DELAYED_WORK(mdm2ap_status_check_work, mdm2ap_status_check);
 
+static void mdm_update_gpio_configs(enum gpio_update_config gpio_config)
+{
+	/* Some gpio configuration may need updating after modem bootup.*/
+	switch (gpio_config) {
+	case GPIO_UPDATE_RUNNING_CONFIG:
+		if (mdm_drv->pdata->mdm2ap_status_gpio_run_cfg) {
+			if (msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
+				GPIOMUX_ACTIVE,
+				mdm_drv->pdata->mdm2ap_status_gpio_run_cfg,
+				&mdm2ap_status_old_config))
+				pr_err("%s: failed updating running gpio config\n",
+					   __func__);
+			else
+				mdm2ap_status_valid_old_config = 1;
+		}
+		break;
+	case GPIO_UPDATE_BOOTING_CONFIG:
+		if (mdm2ap_status_valid_old_config) {
+			msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
+					GPIOMUX_ACTIVE,
+					&mdm2ap_status_old_config,
+					NULL);
+			mdm2ap_status_valid_old_config = 0;
+		}
+		break;
+	default:
+		pr_err("%s: called with no config\n", __func__);
+		break;
+	}
+}
+
 long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg)
 {
@@ -205,11 +246,10 @@
 		else
 			first_boot = 0;
 
-		/* Start a timer to check that the mdm2ap_status gpio
-		 * goes high.
+		/* If successful, start a timer to check that the mdm2ap_status
+		 * gpio goes high.
 		 */
-
-		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
+		if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
 			schedule_delayed_work(&mdm2ap_status_check_work,
 				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
 		break;
@@ -266,6 +306,9 @@
 	pr_debug("%s: status:%d\n", __func__, value);
 	if (mdm_drv->mdm_ready && mdm_drv->ops->status_cb)
 		mdm_drv->ops->status_cb(mdm_drv, value);
+
+	/* Update gpio configuration to "running" config. */
+	mdm_update_gpio_configs(GPIO_UPDATE_RUNNING_CONFIG);
 }
 
 static DECLARE_WORK(mdm_status_work, mdm_status_fn);
@@ -364,6 +407,7 @@
 static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
 {
 	mdm_drv->mdm_ready = 0;
+	cancel_delayed_work(&mdm2ap_status_check_work);
 	gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
 	if (mdm_drv->pdata->ramdump_delay_ms > 0) {
 		/* Wait for the external modem to complete
@@ -371,10 +415,13 @@
 		 */
 		msleep(mdm_drv->pdata->ramdump_delay_ms);
 	}
-	if (!mdm_drv->mdm_unexpected_reset_occurred)
+	if (!mdm_drv->mdm_unexpected_reset_occurred) {
 		mdm_drv->ops->reset_mdm_cb(mdm_drv);
-	else
+		/* Update gpio configuration to "booting" config. */
+		mdm_update_gpio_configs(GPIO_UPDATE_BOOTING_CONFIG);
+	} else {
 		mdm_drv->mdm_unexpected_reset_occurred = 0;
+	}
 	return 0;
 }
 
@@ -404,6 +451,7 @@
 				const struct subsys_desc *crashed_subsys)
 {
 	mdm_drv->mdm_ram_dump_status = 0;
+	cancel_delayed_work(&mdm2ap_status_check_work);
 	if (want_dumps) {
 		mdm_drv->boot_type = CHARM_RAM_DUMPS;
 		complete(&mdm_needs_reload);
@@ -416,8 +464,11 @@
 			pr_info("%s: mdm modem ramdumps completed.\n",
 					__func__);
 		INIT_COMPLETION(mdm_ram_dumps);
-		if (!mdm_drv->pdata->no_powerdown_after_ramdumps)
+		if (!mdm_drv->pdata->no_powerdown_after_ramdumps) {
 			mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+			/* Update gpio configuration to "booting" config. */
+			mdm_update_gpio_configs(GPIO_UPDATE_BOOTING_CONFIG);
+		}
 	}
 	return mdm_drv->mdm_ram_dump_status;
 }
@@ -429,23 +480,33 @@
 	.name = EXTERNAL_MODEM,
 };
 
-static int mdm_debug_on_set(void *data, u64 val)
+/* Once the gpios are sent to RPM and debugging
+ * starts, there is no way to stop it without
+ * rebooting the device.
+ */
+static int mdm_debug_mask_set(void *data, u64 val)
 {
-	mdm_debug_on = val;
+	if (!vddmin_gpios_sent &&
+		(val & MDM_DEBUG_MASK_VDDMIN_SETUP)) {
+		mdm_setup_vddmin_gpios();
+		vddmin_gpios_sent = 1;
+	}
+
+	mdm_debug_mask = val;
 	if (mdm_drv->ops->debug_state_changed_cb)
-		mdm_drv->ops->debug_state_changed_cb(mdm_debug_on);
+		mdm_drv->ops->debug_state_changed_cb(mdm_debug_mask);
 	return 0;
 }
 
-static int mdm_debug_on_get(void *data, u64 *val)
+static int mdm_debug_mask_get(void *data, u64 *val)
 {
-	*val = mdm_debug_on;
+	*val = mdm_debug_mask;
 	return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(mdm_debug_on_fops,
-			mdm_debug_on_get,
-			mdm_debug_on_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(mdm_debug_mask_fops,
+			mdm_debug_mask_get,
+			mdm_debug_mask_set, "%llu\n");
 
 static int mdm_debugfs_init(void)
 {
@@ -455,8 +516,8 @@
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 
-	debugfs_create_file("debug_on", 0644, dent, NULL,
-			&mdm_debug_on_fops);
+	debugfs_create_file("debug_mask", 0644, dent, NULL,
+			&mdm_debug_mask_fops);
 	return 0;
 }
 
@@ -552,7 +613,7 @@
 
 	mdm_modem_initialize_data(pdev, p_mdm_cb);
 	if (mdm_drv->ops->debug_state_changed_cb)
-		mdm_drv->ops->debug_state_changed_cb(mdm_debug_on);
+		mdm_drv->ops->debug_state_changed_cb(mdm_debug_mask);
 
 	gpio_request(mdm_drv->ap2mdm_status_gpio, "AP2MDM_STATUS");
 	gpio_request(mdm_drv->ap2mdm_errfatal_gpio, "AP2MDM_ERRFATAL");
@@ -688,8 +749,6 @@
 	 */
 	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
 		gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
-	/* Register VDDmin gpios with RPM */
-	mdm_setup_vddmin_gpios();
 
 	/* Perform early powerup of the external modem in order to
 	 * allow tabla devices to be found.
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index 7aba83d..d1e85d3 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -13,6 +13,7 @@
 #ifndef _ARCH_ARM_MACH_MSM_MDM_PRIVATE_H
 #define _ARCH_ARM_MACH_MSM_MDM_PRIVATE_H
 
+#define MDM_DEBUG_MASK_VDDMIN_SETUP (0x00000002)
 struct mdm_modem_drv;
 
 struct mdm_ops {
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 63c2d3a..4a2fd7c 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -390,24 +390,33 @@
 	[MEMTYPE_EBI1] = "EBI1",
 };
 
-static int reserve_memory_type(char *mem_name,
-				struct memtype_reserve *reserve_table,
-				int size)
+int msm_get_memory_type_from_name(const char *memtype_name)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
-		if (memtype_names[i] && strcmp(mem_name,
-				memtype_names[i]) == 0) {
-			reserve_table[i].size += size;
-			return 0;
-		}
+		if (memtype_names[i] &&
+		    strcmp(memtype_name, memtype_names[i]) == 0)
+			return i;
 	}
 
-	pr_err("Could not find memory type %s\n", mem_name);
+	pr_err("Could not find memory type %s\n", memtype_name);
 	return -EINVAL;
 }
 
+static int reserve_memory_type(const char *mem_name,
+				struct memtype_reserve *reserve_table,
+				int size)
+{
+	int ret = msm_get_memory_type_from_name(mem_name);
+
+	if (ret >= 0) {
+		reserve_table[ret].size += size;
+		ret = 0;
+	}
+	return ret;
+}
+
 static int check_for_compat(unsigned long node)
 {
 	char **start = __compat_exports_start;
diff --git a/arch/arm/mach-msm/msm_watchdog_v2.c b/arch/arm/mach-msm/msm_watchdog_v2.c
index a5f8bcc..a65cd21 100644
--- a/arch/arm/mach-msm/msm_watchdog_v2.c
+++ b/arch/arm/mach-msm/msm_watchdog_v2.c
@@ -22,6 +22,8 @@
 #include <linux/of.h>
 #include <linux/cpu.h>
 #include <linux/platform_device.h>
+#include <mach/scm.h>
+#include <mach/msm_memory_dump.h>
 
 #define MODULE_NAME "msm_watchdog"
 #define WDT0_ACCSCSSNBARK_INT 0
@@ -32,7 +34,8 @@
 #define WDT0_BARK_TIME	0x10
 #define WDT0_BITE_TIME	0x14
 
-#define MASK_SIZE	32
+#define MASK_SIZE		32
+#define SCM_SET_REGSAVE_CMD	0x2
 
 struct msm_watchdog_data {
 	unsigned int __iomem phys_base;
@@ -47,6 +50,7 @@
 	unsigned long long last_pet;
 	unsigned min_slack_ticks;
 	unsigned long long min_slack_ns;
+	void *scm_regsave;
 	cpumask_t alive_mask;
 	struct work_struct init_dogwork_struct;
 	struct delayed_work dogwork_struct;
@@ -242,6 +246,44 @@
 	return IRQ_HANDLED;
 }
 
+static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
+{
+	int ret;
+	struct msm_client_dump dump_entry;
+	struct {
+		unsigned addr;
+		int len;
+	} cmd_buf;
+
+	wdog_dd->scm_regsave = (void *)__get_free_page(GFP_KERNEL);
+	if (wdog_dd->scm_regsave) {
+		cmd_buf.addr = virt_to_phys(wdog_dd->scm_regsave);
+		cmd_buf.len  = PAGE_SIZE;
+		ret = scm_call(SCM_SVC_UTIL, SCM_SET_REGSAVE_CMD,
+					&cmd_buf, sizeof(cmd_buf), NULL, 0);
+		if (ret)
+			pr_err("Setting register save address failed.\n"
+				       "Registers won't be dumped on a dog "
+				       "bite\n");
+		dump_entry.id = MSM_CPU_CTXT;
+		dump_entry.start_addr = virt_to_phys(wdog_dd->scm_regsave);
+		dump_entry.end_addr = dump_entry.start_addr + PAGE_SIZE;
+		ret = msm_dump_table_register(&dump_entry);
+		if (ret)
+			pr_err("Setting cpu dump region failed\n"
+				"Registers wont be dumped during cpu hang\n");
+	} else {
+		pr_err("Allocating register save space failed\n"
+			       "Registers won't be dumped on a dog bite\n");
+		/*
+		 * No need to bail if allocation fails. Simply don't
+		 * send the command, and the secure side will reset
+		 * without saving registers.
+		 */
+	}
+}
+
+
 static void init_watchdog_work(struct work_struct *work)
 {
 	struct msm_watchdog_data *wdog_dd = container_of(work,
@@ -252,6 +294,7 @@
 	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
 	wdog_dd->min_slack_ticks = UINT_MAX;
 	wdog_dd->min_slack_ns = ULLONG_MAX;
+	configure_bark_dump(wdog_dd);
 	timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
 	__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
 	__raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 753f6fb..a9c3f4c 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -24,14 +24,8 @@
 #include <linux/seq_file.h>
 #include <mach/ocmem_priv.h>
 
-/* This code is to temporarily work around the default state of OCMEM
-   regions in Virtio. These registers will be read from DT in a subsequent
-   patch which initializes the regions to appropriate default state.
-*/
-
 #define OCMEM_REGION_CTL_BASE 0xFDD0003C
 #define OCMEM_REGION_CTL_SIZE 0xFD0
-#define REGION_ENABLE 0x00003333
 #define GRAPHICS_REGION_CTL (0x17F000)
 
 struct ocmem_partition {
@@ -269,6 +263,30 @@
 	return i;
 }
 
+#if defined(CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL)
+static int parse_power_ctrl_config(struct ocmem_plat_data *pdata,
+					struct device_node *node)
+{
+	pdata->rpm_pwr_ctrl = false;
+	pdata->rpm_rsc_type = ~0x0;
+	return 0;
+}
+#else
+static int parse_power_ctrl_config(struct ocmem_plat_data *pdata,
+					struct device_node *node)
+{
+	unsigned rsc_type = ~0x0;
+	pdata->rpm_pwr_ctrl = false;
+	if (of_property_read_u32(node, "qcom,resource-type",
+					&rsc_type))
+		return -EINVAL;
+	pdata->rpm_pwr_ctrl = true;
+	pdata->rpm_rsc_type = rsc_type;
+	return 0;
+
+}
+#endif /* CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL */
+
 static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
 {
 	struct device   *dev = &pdev->dev;
@@ -393,6 +411,11 @@
 	} else
 		dev_dbg(dev, "Found %d ocmem partitions\n", nr_parts);
 
+	if (parse_power_ctrl_config(pdata, node)) {
+		dev_err(dev, "No OCMEM RPM Resource specified\n");
+		return NULL;
+	}
+
 	pdata->nr_parts = nr_parts;
 	pdata->parts = parts;
 	pdata->nr_regions = nr_regions;
@@ -516,6 +539,9 @@
 
 	platform_set_drvdata(pdev, ocmem_pdata);
 
+	if (ocmem_core_init(pdev))
+		return -EBUSY;
+
 	if (ocmem_zone_init(pdev))
 		return -EBUSY;
 
@@ -529,10 +555,7 @@
 							OCMEM_REGION_CTL_SIZE);
 	if (!ocmem_region_vbase)
 		return -EBUSY;
-	/* Enable all the 3 regions until we have support for power features */
-	writel_relaxed(REGION_ENABLE, ocmem_region_vbase);
-	writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 4);
-	writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 8);
+
 	/* Enable the ocmem graphics mpU as a workaround in Virtio */
 	/* This will be programmed by TZ after TZ support is integrated */
 	writel_relaxed(GRAPHICS_REGION_CTL, ocmem_region_vbase + 0xFCC);
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index bb32fca..a5aed5e 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -399,3 +399,22 @@
 	mutex_unlock(&ocmem_eviction_lock);
 	return ret;
 }
+
+/* Wrappers until power control is transitioned to clients */
+enum ocmem_power_state ocmem_get_power_state(int client_id,
+						struct ocmem_buf *buffer)
+{
+	return 0;
+}
+
+int ocmem_set_power_state(int client_id, struct ocmem_buf *buffer,
+					enum ocmem_power_state new_state)
+{
+	return 0;
+}
+
+struct ocmem_vectors *ocmem_get_vectors(int client_id,
+				struct ocmem_buf *buffer)
+{
+	return NULL;
+}
diff --git a/arch/arm/mach-msm/ocmem_core.c b/arch/arm/mach-msm/ocmem_core.c
new file mode 100644
index 0000000..019f59f
--- /dev/null
+++ b/arch/arm/mach-msm/ocmem_core.c
@@ -0,0 +1,708 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/ocmem_priv.h>
+#include <mach/rpm-smd.h>
+
+static unsigned num_regions;
+static unsigned num_macros;
+static unsigned num_ports;
+static unsigned num_banks;
+
+static unsigned long macro_size;
+static unsigned long region_size;
+
+static bool rpm_power_control;
+
+struct ocmem_hw_macro {
+	atomic_t m_on[OCMEM_CLIENT_MAX];
+	atomic_t m_retain[OCMEM_CLIENT_MAX];
+	unsigned m_state;
+};
+
+struct ocmem_hw_region {
+	unsigned psgsc_ctrl;
+	bool interleaved;
+	unsigned int mode;
+	unsigned int num_macros;
+	struct ocmem_hw_macro *macro;
+	struct msm_rpm_request *rpm_req;
+	unsigned r_state;
+};
+
+static struct ocmem_hw_region *region_ctrl;
+static struct mutex region_ctrl_lock;
+static void *ocmem_base;
+
+#define OCMEM_V1_REGIONS 3
+#define OCMEM_V1_MACROS 8
+
+#define OC_HW_VERS (0x0)
+#define OC_HW_PROFILE (0x4)
+#define OC_GEN_STATUS (0xC)
+#define OC_PSGSC_STATUS (0x38)
+#define OC_PSGSC_CTL (0x3C)
+#define OC_REGION_CTL (0x1000)
+
+#define NUM_PORTS_MASK (0xF << 0)
+#define NUM_PORTS_SHIFT (0)
+
+#define NUM_MACROS_MASK (0xF << 8)
+#define NUM_MACROS_SHIFT (8)
+
+#define INTERLEAVING_MASK (0x1 << 17)
+#define INTERLEAVING_SHIFT (17)
+
+/* Power states of each memory macro */
+#define PASSTHROUGH (0x0)
+#define CORE_ON (0x2)
+#define PERI_ON (0x1)
+#define CLK_OFF (0x4)
+#define MACRO_ON (CORE_ON|PERI_ON)
+#define MACRO_SLEEP_RETENTION (CLK_OFF|CORE_ON)
+#define MACRO_SLEEP_RETENTION_PERI_ON (CLK_OFF|MACRO_ON)
+#define MACRO_OFF (CLK_OFF)
+
+#define M_PSCGC_CTL_n(x) (0x7 << (x * 4))
+
+#define PSCGC_CTL_IDX(x) ((x) * 0x4)
+#define PSCGC_CTL_n(x) (OC_PSGSC_CTL + (PSCGC_CTL_IDX(x)))
+
+/* Power states of each ocmem region */
+#define REGION_NORMAL_PASSTHROUGH 0x00000000
+#define REGION_FORCE_PERI_ON 0x00001111
+#define REGION_FORCE_CORE_ON 0x00002222
+#define REGION_FORCE_ALL_ON 0x00003333
+#define REGION_SLEEP_NO_RETENTION 0x00004444
+#define REGION_SLEEP_PERI_OFF 0x00006666
+#define REGION_SLEEP_PERI_ON 0x00007777
+
+#define REGION_DEFAULT_OFF REGION_SLEEP_NO_RETENTION
+#define REGION_DEFAULT_ON REGION_FORCE_ALL_ON
+#define REGION_DEFAULT_RETENTION REGION_SLEEP_PERI_OFF
+
+enum rpm_macro_state {
+	rpm_macro_off = 0x0,
+	rpm_macro_retain,
+	rpm_macro_on,
+};
+
+static int rpm_write(unsigned long val, unsigned id);
+
+static inline unsigned hw_macro_state(unsigned region_state)
+{
+	unsigned macro_state;
+
+	switch (region_state) {
+	case REGION_DEFAULT_ON:
+		macro_state = MACRO_ON;
+		break;
+	case REGION_DEFAULT_OFF:
+		macro_state = MACRO_OFF;
+		break;
+	case REGION_DEFAULT_RETENTION:
+		macro_state = MACRO_SLEEP_RETENTION;
+		break;
+	default:
+		macro_state = MACRO_OFF;
+		break;
+	}
+	return macro_state;
+}
+
+static inline unsigned rpm_macro_state(unsigned hw_macro_state)
+{
+	unsigned macro_state;
+
+	switch (hw_macro_state) {
+	case MACRO_ON:
+		macro_state = rpm_macro_on;
+		break;
+	case MACRO_OFF:
+		macro_state = rpm_macro_off;
+		break;
+	case MACRO_SLEEP_RETENTION:
+		macro_state = rpm_macro_retain;
+		break;
+	default:
+		macro_state = rpm_macro_off;
+		break;
+	}
+	return macro_state;
+}
+
+/* Generic wrapper that sets the region state either
+   by a direct write or through appropriate RPM call
+*/
+/* Must be called with region mutex held */
+static int commit_region_state(unsigned region_num)
+{
+	int rc = -1;
+	unsigned new_state;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	new_state = region_ctrl[region_num].r_state;
+	pr_debug("ocmem: commit region (%d) new state %x\n", region_num,
+								new_state);
+	if (rpm_power_control)
+		rc = rpm_write(new_state, region_num);
+	else
+		rc = ocmem_write(new_state,
+					ocmem_base + PSCGC_CTL_n(region_num));
+	return 0;
+}
+
+/* Returns the current state of a OCMEM region */
+/* Must be called with region mutex held */
+static int read_region_state(unsigned region_num)
+{
+	int state;
+
+	pr_debug("rpm_get_region_state: #: %d\n", region_num);
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	if (rpm_power_control)
+		state = region_ctrl[region_num].r_state;
+	else
+		state = ocmem_read(ocmem_base + PSCGC_CTL_n(region_num));
+
+	pr_debug("ocmem: region (%d) state %x\n", region_num, state);
+
+	return state;
+}
+
+/* Returns the current state of a OCMEM macro that belongs to a region */
+static int read_macro_state(unsigned region_num, unsigned macro_num)
+{
+	int state;
+
+	if (macro_num >= num_banks)
+		return -EINVAL;
+
+	state = read_region_state(region_num);
+
+	if (state < 0)
+		return -EINVAL;
+
+	state &= M_PSCGC_CTL_n(macro_num);
+	state = state >> (macro_num * 4);
+
+	pr_debug("rpm_get_macro_state: macro (%d) region (%d) state %x\n",
+			macro_num, region_num, state);
+
+	return state;
+}
+
+static int apply_macro_vote(int id, unsigned region_num,
+				unsigned macro_num, int new_state)
+{
+	struct ocmem_hw_macro *m = NULL;
+	struct ocmem_hw_region *region = NULL;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	if (macro_num >= num_banks)
+		return -EINVAL;
+
+	region = &region_ctrl[region_num];
+
+	m = &region->macro[macro_num];
+
+	pr_debug("m (%d): curr state %x votes (on: %d retain %d) new state %x\n",
+			macro_num, m->m_state,
+			atomic_read(&m->m_on[id]),
+			atomic_read(&m->m_retain[id]),
+			new_state);
+
+	switch (m->m_state) {
+	case MACRO_OFF:
+		if (new_state == MACRO_ON)
+			atomic_inc(&m->m_on[id]);
+		break;
+	case MACRO_ON:
+		if (new_state == MACRO_OFF) {
+			atomic_dec(&m->m_on[id]);
+		} else if (new_state == MACRO_SLEEP_RETENTION) {
+			atomic_inc(&m->m_retain[id]);
+			atomic_dec(&m->m_on[id]);
+		}
+		break;
+	case MACRO_SLEEP_RETENTION:
+		if (new_state == MACRO_OFF) {
+			atomic_dec(&m->m_retain[id]);
+		} else if (new_state == MACRO_ON) {
+			atomic_inc(&m->m_on[id]);
+			atomic_dec(&m->m_retain[id]);
+		}
+		break;
+	}
+
+	pr_debug("macro (%d) region (%d) votes for %d (on: %d retain %d)\n",
+				region_num, macro_num, id,
+				atomic_read(&m->m_on[id]),
+				atomic_read(&m->m_retain[id]));
+	return 0;
+}
+
+static int aggregate_macro_state(unsigned region_num, unsigned macro_num)
+{
+	struct ocmem_hw_macro *m = NULL;
+	struct ocmem_hw_region *region = NULL;
+	int i = 0;
+	/* The default is for the macro to be OFF */
+	unsigned m_state = MACRO_OFF;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	if (macro_num >= num_banks)
+		return -EINVAL;
+
+	region = &region_ctrl[region_num];
+	m = &region->macro[macro_num];
+
+	for (i = 0; i < OCMEM_CLIENT_MAX; i++) {
+		if (atomic_read(&m->m_on[i]) > 0) {
+			/* atleast one client voted for ON state */
+			m_state = MACRO_ON;
+			goto done_aggregation;
+		} else if (atomic_read(&m->m_retain[i]) > 0) {
+			m_state = MACRO_SLEEP_RETENTION;
+			/* continue and examine votes of other clients */
+		}
+	}
+done_aggregation:
+	m->m_state = m_state;
+	pr_debug("macro (%d) region (%d) aggregated state %x", macro_num,
+						region_num, m->m_state);
+	return 0;
+}
+
+static int aggregate_region_state(unsigned region_num)
+{
+	struct ocmem_hw_region *region = NULL;
+	unsigned r_state;
+	unsigned i = 0;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	region = &region_ctrl[region_num];
+	r_state = REGION_DEFAULT_OFF;
+
+	/* In wide mode all macros must have the same state */
+	if (region->mode == WIDE_MODE) {
+		for (i = 0; i < region->num_macros; i++) {
+			if (region->macro[i].m_state == MACRO_ON) {
+				r_state = REGION_DEFAULT_ON;
+				break;
+			} else if (region->macro[i].m_state ==
+						MACRO_SLEEP_RETENTION) {
+				r_state = REGION_DEFAULT_RETENTION;
+			}
+		}
+	} else {
+	/* In narrow mode each macro is allowed to be in a different state */
+	/* The region mode is simply the collection of all macro states */
+		for (i = 0; i < region->num_macros; i++) {
+			r_state &= ~M_PSCGC_CTL_n(i);
+			r_state |= region->macro[i].m_state << (i * 4);
+		}
+	}
+
+	pr_debug("region (%d) curr state (%x) aggregated state (%x)\n",
+			region_num, region->r_state, r_state);
+	region->r_state = r_state;
+	return 0;
+}
+
+static int rpm_write(unsigned long val, unsigned id)
+{
+	int i = 0;
+	int ret = 0;
+	struct ocmem_hw_region *region;
+
+	region = &region_ctrl[id];
+
+	for (i = 0; i < region->num_macros; i++) {
+		unsigned macro_state;
+		unsigned rpm_state;
+
+		macro_state = read_macro_state(id, i);
+		rpm_state = rpm_macro_state(macro_state);
+
+		if (val == REGION_DEFAULT_ON) {
+			pr_debug("macro (%d) region (%d) -> active\n",
+				i, id);
+			rpm_state = rpm_macro_on;
+		}
+
+		if (val == REGION_DEFAULT_OFF) {
+			pr_debug("macro (%d) region (%d) -> off\n",
+				i, id);
+			rpm_state = rpm_macro_off;
+		}
+
+		ret = msm_rpm_add_kvp_data(region->rpm_req, i,
+						(u8 *) &rpm_state, 4);
+
+		if (ret < 0) {
+			pr_err("ocmem: Error adding key %d val %d on rsc %d\n",
+					i, rpm_state, id);
+			return -EINVAL;
+		}
+	}
+
+	ret = msm_rpm_send_request(region->rpm_req);
+
+	if (ret < 0) {
+		pr_err("ocmem: Error sending RPM request\n");
+		return -EINVAL;
+	}
+
+	pr_debug("Transmit request to rpm for region %d\n", id);
+	return 0;
+}
+
+
+#if defined(CONFIG_MSM_OCMEM_POWER_DEBUG)
+
+static int read_hw_region_state(unsigned region_num)
+{
+	int state;
+
+	pr_debug("rpm_get_region_state: #: %d\n", region_num);
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	state = ocmem_read(ocmem_base + PSCGC_CTL_n(region_num));
+
+	pr_debug("ocmem: region (%d) state %x\n", region_num, state);
+
+	return state;
+}
+
+int ocmem_region_toggle(unsigned int r_num)
+{
+	unsigned reboot_state = ~0x0;
+	unsigned m_num = 0;
+
+	mutex_lock(&region_ctrl_lock);
+	/* Turn on each macro at boot for quick hw sanity check */
+	reboot_state = read_hw_region_state(r_num);
+
+	if (reboot_state != REGION_DEFAULT_OFF) {
+		pr_err("Region %d not in power off state (%x)\n",
+				r_num, reboot_state);
+		goto toggle_fail;
+	}
+
+	for (m_num = 0; m_num < num_banks; m_num++) {
+		apply_macro_vote(0, r_num, m_num, MACRO_ON);
+		aggregate_macro_state(r_num, m_num);
+	}
+
+	aggregate_region_state(r_num);
+	commit_region_state(r_num);
+
+	reboot_state = read_hw_region_state(r_num);
+
+	if (reboot_state != REGION_DEFAULT_ON) {
+		pr_err("Failed to power on Region %d(state:%x)\n",
+				r_num, reboot_state);
+		goto toggle_fail;
+	}
+
+	/* Turn off all memory macros again */
+
+	for (m_num = 0; m_num < num_banks; m_num++) {
+		apply_macro_vote(0, r_num, m_num, MACRO_OFF);
+		aggregate_macro_state(r_num, m_num);
+	}
+
+	aggregate_region_state(r_num);
+	commit_region_state(r_num);
+
+	reboot_state = read_hw_region_state(r_num);
+
+	if (reboot_state != REGION_DEFAULT_OFF) {
+		pr_err("Failed to power off Region %d(state:%x)\n",
+				r_num, reboot_state);
+		goto toggle_fail;
+	}
+	mutex_unlock(&region_ctrl_lock);
+	return 0;
+
+toggle_fail:
+	mutex_unlock(&region_ctrl_lock);
+	return -EINVAL;
+}
+
+int memory_is_off(unsigned int num)
+{
+	if (read_hw_region_state(num) == REGION_DEFAULT_OFF)
+		return 1;
+	else
+		return 0;
+}
+
+#else
+int ocmem_region_toggle(unsigned int region_num)
+{
+	return 0;
+}
+
+int memory_is_off(unsigned int num)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_OCMEM_POWER_DEBUG */
+
+/* Memory Macro Power Transition Sequences
+ * Normal to Sleep With Retention:
+	REGION_DEFAULT_ON -> REGION_DEFAULT_RETENTION
+ * Sleep With Retention to Normal:
+	REGION_DEFAULT_RETENTION -> REGION_FORCE_CORE_ON -> REGION_DEFAULT_ON
+ * Normal to OFF:
+	REGION_DEFAULT_ON -> REGION_DEFAULT_OFF
+ * OFF to Normal:
+	REGION_DEFAULT_OFF -> REGION_DEFAULT_ON
+**/
+
+static int switch_power_state(int id, unsigned long offset, unsigned long len,
+			unsigned new_state)
+{
+	unsigned region_start = num_regions;
+	unsigned region_end = num_regions;
+	unsigned curr_state = 0x0;
+	int i = 0;
+	int j = 0;
+	unsigned start_m = num_banks;
+	unsigned end_m = num_banks;
+	unsigned long region_offset = 0;
+
+	if (offset < 0)
+		return -EINVAL;
+
+	if (len < macro_size)
+		return -EINVAL;
+
+
+	pr_debug("ocmem: power_transition to %x for client %d\n", new_state,
+							id);
+
+	region_start = offset / region_size;
+	region_end = (offset + len - 1) / region_size;
+
+	pr_debug("ocmem: region start %u end %u\n", region_start, region_end);
+
+	if (region_start >= num_regions ||
+		(region_end >= num_regions))
+			return -EINVAL;
+
+	mutex_lock(&region_ctrl_lock);
+
+	for (i = region_start; i <= region_end; i++) {
+
+		curr_state = read_region_state(i);
+
+		switch (curr_state) {
+		case REGION_DEFAULT_OFF:
+			if (new_state != REGION_DEFAULT_ON)
+				goto invalid_transition;
+			break;
+		case REGION_DEFAULT_RETENTION:
+			if (new_state != REGION_DEFAULT_ON)
+				goto invalid_transition;
+			break;
+		default:
+			break;
+		}
+
+		if (len >= region_size) {
+			pr_debug("switch: entire region (%d)\n", i);
+			start_m = 0;
+			end_m = num_banks;
+		} else {
+			region_offset = offset - (i * region_size);
+			start_m = region_offset / macro_size;
+			end_m = (region_offset + len - 1) / macro_size;
+			pr_debug("switch: macro (%u to %u)\n", start_m, end_m);
+		}
+
+		for (j = start_m; j <= end_m; j++) {
+			pr_debug("vote: macro (%d) region (%d)\n", j, i);
+			apply_macro_vote(id, i, j,
+				hw_macro_state(new_state));
+			aggregate_macro_state(i, j);
+		}
+		aggregate_region_state(i);
+		commit_region_state(i);
+		len -= region_size;
+
+		/* If we voted ON/retain the banks must never be OFF */
+		if (new_state != REGION_DEFAULT_OFF) {
+			if (memory_is_off(i)) {
+				pr_err("ocmem: Accessing memory during sleep\n");
+				WARN_ON(1);
+			}
+		}
+
+	}
+	mutex_unlock(&region_ctrl_lock);
+	return 0;
+invalid_transition:
+	mutex_unlock(&region_ctrl_lock);
+	pr_err("ocmem_core: Invalid state transition detected for %d\n", id);
+	pr_err("ocmem_core: Offset %lx Len %lx curr_state %x new_state %x\n",
+			offset, len, curr_state, new_state);
+	WARN_ON(1);
+	return -EINVAL;
+}
+
+/* Interfaces invoked from the scheduler */
+int ocmem_memory_off(int id, unsigned long offset, unsigned long len)
+{
+	return switch_power_state(id, offset, len, REGION_DEFAULT_OFF);
+}
+
+int ocmem_memory_on(int id, unsigned long offset, unsigned long len)
+{
+	return switch_power_state(id, offset, len, REGION_DEFAULT_ON);
+}
+
+int ocmem_memory_retain(int id, unsigned long offset, unsigned long len)
+{
+	return switch_power_state(id, offset, len, REGION_DEFAULT_RETENTION);
+}
+
+int ocmem_core_init(struct platform_device *pdev)
+{
+	struct device   *dev = &pdev->dev;
+	struct ocmem_plat_data *pdata = NULL;
+	unsigned hw_ver;
+	bool interleaved;
+	unsigned i, j, k;
+	unsigned rsc_type = 0;
+
+	pdata = platform_get_drvdata(pdev);
+	ocmem_base = pdata->reg_base;
+
+	hw_ver = ocmem_read(ocmem_base + OC_HW_PROFILE);
+
+	if (pdata->nr_regions != OCMEM_V1_REGIONS) {
+		pr_err("Invalid number of regions (%d)\n", pdata->nr_regions);
+		goto hw_not_supported;
+	}
+
+	num_macros = (hw_ver & NUM_MACROS_MASK) >> NUM_MACROS_SHIFT;
+	num_ports = (hw_ver & NUM_PORTS_MASK) >> NUM_PORTS_SHIFT;
+
+	if (num_macros != OCMEM_V1_MACROS) {
+		pr_err("Invalid number of macros (%d)\n", pdata->nr_macros);
+		goto hw_not_supported;
+	}
+
+	interleaved = (hw_ver & INTERLEAVING_MASK) >> INTERLEAVING_SHIFT;
+
+	if (interleaved == false) {
+		pr_err("Interleaving is disabled\n");
+		goto hw_not_supported;
+	}
+
+	num_regions = pdata->nr_regions;
+
+	pdata->interleaved = true;
+	pdata->nr_macros = num_macros;
+	pdata->nr_ports = num_ports;
+	macro_size = SZ_64K;
+	region_size = macro_size * num_ports;
+	num_banks = num_ports / 2;
+	rsc_type = pdata->rpm_rsc_type;
+
+	pr_debug("ocmem_core: ports %d regions %d macros %d interleaved %d\n",
+				num_ports, num_regions, num_macros,
+				interleaved);
+
+	region_ctrl = devm_kzalloc(dev, sizeof(struct ocmem_hw_region)
+					 * num_regions, GFP_KERNEL);
+
+	if (!region_ctrl) {
+		pr_err("ocmem: Unable to allocate memory\n");
+		return -EINVAL;
+	}
+
+	mutex_init(&region_ctrl_lock);
+
+	for (i = 0 ; i < num_regions; i++) {
+		struct ocmem_hw_region *region = &region_ctrl[i];
+		struct msm_rpm_request *req = NULL;
+		region->interleaved = interleaved;
+		region->mode = MODE_DEFAULT;
+		region->r_state = REGION_DEFAULT_OFF;
+		region->num_macros = num_banks;
+
+		region->macro = devm_kzalloc(dev,
+					sizeof(struct ocmem_hw_macro) *
+						num_banks, GFP_KERNEL);
+		if (!region->macro) {
+			pr_err("ocmem: Unable to allocate memory\n");
+			return -EINVAL;
+		}
+
+		for (j = 0; j < num_banks; j++) {
+			struct ocmem_hw_macro *m = &region->macro[j];
+			m->m_state = MACRO_OFF;
+			for (k = 0; k < OCMEM_CLIENT_MAX; k++) {
+				atomic_set(&m->m_on[k], 0);
+				atomic_set(&m->m_retain[k], 0);
+			}
+		}
+
+		if (pdata->rpm_pwr_ctrl) {
+			rpm_power_control = true;
+			req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET,
+					rsc_type, i, num_banks);
+
+			if (!req) {
+				pr_err("Unable to create RPM request\n");
+				return -EINVAL;
+			}
+
+			pr_debug("rpm request type %x (rsc: %d) with %d elements\n",
+						rsc_type, i, num_banks);
+
+			region->rpm_req = req;
+		}
+
+		if (ocmem_region_toggle(i)) {
+			pr_err("Failed to verify region %d\n", i);
+			goto hw_not_supported;
+		}
+
+	}
+	return 0;
+hw_not_supported:
+	pr_err("Unsupported OCMEM h/w configuration %x\n", hw_ver);
+	return -EINVAL;
+}
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
index 6b93d04..5649021 100644
--- a/arch/arm/mach-msm/ocmem_rdm.c
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -109,19 +109,6 @@
 	unsigned int ctrl;
 } dm_table[RDM_MAX_ENTRIES];
 
-/* Wrapper that will shadow these values later */
-static int ocmem_read(void *at)
-{
-	return readl_relaxed(at);
-}
-
-/* Wrapper that will shadow these values later */
-static int ocmem_write(unsigned long val, void *at)
-{
-	writel_relaxed(val, at);
-	return 0;
-}
-
 static inline int client_ctrl_id(int id)
 {
 	return (id == OCMEM_SENSORS) ? 1 : 0;
diff --git a/arch/arm/mach-msm/ocmem_sched.c b/arch/arm/mach-msm/ocmem_sched.c
index f6d066d..70e6860 100644
--- a/arch/arm/mach-msm/ocmem_sched.c
+++ b/arch/arm/mach-msm/ocmem_sched.c
@@ -1083,6 +1083,7 @@
 static int process_grow(struct ocmem_req *req)
 {
 	int rc = 0;
+	unsigned long offset = 0;
 
 	/* Attempt to grow the region */
 	rc = do_grow(req);
@@ -1097,6 +1098,15 @@
 			return -EINVAL;
 	}
 
+	offset = phys_to_offset(req->req_start);
+
+	rc = ocmem_memory_on(req->owner, offset, req->req_sz);
+
+	if (rc < 0) {
+		pr_err("Failed to switch ON memory macros\n");
+		goto power_ctl_error;
+	}
+
 	/* Notify the client about the buffer growth */
 	rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
 	if (rc < 0) {
@@ -1105,6 +1115,8 @@
 		BUG();
 	}
 	return 0;
+power_ctl_error:
+	return -EINVAL;
 }
 
 static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
@@ -1187,6 +1199,7 @@
 {
 	struct ocmem_req *req = NULL;
 	struct ocmem_buf *buffer = NULL;
+	unsigned long offset = 0;
 	int rc = 0;
 
 	if (is_blocked(id)) {
@@ -1211,6 +1224,20 @@
 			return -EINVAL;
 	}
 
+
+	if (req->req_sz != 0) {
+
+		offset = phys_to_offset(req->req_start);
+
+		rc = ocmem_memory_off(req->owner, offset, req->req_sz);
+
+		if (rc < 0) {
+			pr_err("Failed to switch OFF memory macros\n");
+			return -EINVAL;
+		}
+
+	}
+
 	rc = do_free(req);
 
 	if (rc < 0)
@@ -1244,7 +1271,6 @@
 		event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
 	else
 		event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
-
 	up_write(&req->rw_sem);
 	kfree(work_data);
 	dispatch_notification(id, event, buffer);
@@ -1300,6 +1326,7 @@
 		goto transfer_out_error;
 	}
 
+
 	return 0;
 
 transfer_out_error:
@@ -1538,6 +1565,7 @@
 	struct ocmem_req *req = NULL;
 	struct ocmem_buf *buffer = NULL;
 	int rc = 0;
+	unsigned long offset = 0;
 
 	/* sanity checks */
 	if (is_blocked(id)) {
@@ -1579,8 +1607,21 @@
 			goto map_error;
 	}
 
+	if (req->req_sz != 0) {
+
+		offset = phys_to_offset(req->req_start);
+
+		rc = ocmem_memory_on(req->owner, offset, req->req_sz);
+
+		if (rc < 0) {
+			pr_err("Failed to switch ON memory macros\n");
+			goto power_ctl_error;
+		}
+	}
+
 	return 0;
 
+power_ctl_error:
 map_error:
 	handle->req = NULL;
 	do_free(req);
@@ -1595,6 +1636,7 @@
 	struct ocmem_handle *handle = NULL;
 	int rc = 0;
 	int id = req->owner;
+	unsigned long offset = 0;
 
 	handle = req_to_handle(req);
 	BUG_ON(handle == NULL);
@@ -1611,6 +1653,18 @@
 			goto map_error;
 	}
 
+	if (req->req_sz != 0) {
+
+		offset = phys_to_offset(req->req_start);
+
+		rc = ocmem_memory_on(req->owner, offset, req->req_sz);
+
+		if (rc < 0) {
+			pr_err("Failed to switch ON memory macros\n");
+			goto power_ctl_error;
+		}
+	}
+
 	/* Notify the client about the buffer growth */
 	rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
 	if (rc < 0) {
@@ -1620,6 +1674,7 @@
 	}
 	return 0;
 
+power_ctl_error:
 map_error:
 	handle->req = NULL;
 	do_free(req);
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index d954b53..709c8e8 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -619,6 +619,7 @@
 	msm_pcie_dev.pdev = pdev;
 	pdata = pdev->dev.platform_data;
 	msm_pcie_dev.gpio = pdata->gpio;
+	msm_pcie_dev.wake_n = pdata->wake_n;
 	msm_pcie_dev.vreg = msm_pcie_vreg_info;
 	msm_pcie_dev.clk = msm_pcie_clk_info;
 	msm_pcie_dev.res = msm_pcie_res_info;
@@ -706,6 +707,26 @@
 DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
 			msm_pcie_fixup_early);
 
+/* enable wake_n interrupt during suspend */
+static void msm_pcie_fixup_suspend(struct pci_dev *dev)
+{
+	PCIE_DBG("enabling wake_n\n");
+	if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+		enable_irq(msm_pcie_dev.wake_n);
+}
+DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+			  msm_pcie_fixup_suspend);
+
+/* disable wake_n interrupt when system is not in suspend */
+static void msm_pcie_fixup_resume(struct pci_dev *dev)
+{
+	PCIE_DBG("disabling wake_n\n");
+	if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+		disable_irq(msm_pcie_dev.wake_n);
+}
+DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+			 msm_pcie_fixup_resume);
+
 /*
  * actual physical (BAR) address of the device resources starts from
  * MSM_PCIE_DEV_BAR_ADDR; the system axi address for the device resources starts
diff --git a/arch/arm/mach-msm/pcie.h b/arch/arm/mach-msm/pcie.h
index fba6b11..d7cce3e 100644
--- a/arch/arm/mach-msm/pcie.h
+++ b/arch/arm/mach-msm/pcie.h
@@ -68,6 +68,8 @@
 	uint32_t                      axi_bar_end;
 
 	struct resource               dev_mem_res;
+
+	uint32_t                      wake_n;
 };
 
 extern uint32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev);
diff --git a/arch/arm/mach-msm/pcie_irq.c b/arch/arm/mach-msm/pcie_irq.c
index d915561..5a44a17 100644
--- a/arch/arm/mach-msm/pcie_irq.c
+++ b/arch/arm/mach-msm/pcie_irq.c
@@ -39,7 +39,13 @@
 
 static DECLARE_BITMAP(msi_irq_in_use, NR_PCIE_MSI_IRQS);
 
-irqreturn_t handle_msi_irq(int irq, void *data)
+static irqreturn_t handle_wake_irq(int irq, void *data)
+{
+	PCIE_DBG("\n");
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_msi_irq(int irq, void *data)
 {
 	int i, j;
 	unsigned long val;
@@ -87,15 +93,32 @@
 	/* register handler for physical MSI interrupt line */
 	rc = request_irq(PCIE20_INT_MSI, handle_msi_irq, IRQF_TRIGGER_RISING,
 			 "msm_pcie_msi", dev);
-	if (rc)
+	if (rc) {
 		pr_err("Unable to allocate msi interrupt\n");
+		goto out;
+	}
 
+	/* register handler for PCIE_WAKE_N interrupt line */
+	rc = request_irq(dev->wake_n, handle_wake_irq, IRQF_TRIGGER_FALLING,
+			 "msm_pcie_wake", dev);
+	if (rc) {
+		pr_err("Unable to allocate wake interrupt\n");
+		free_irq(PCIE20_INT_MSI, dev);
+		goto out;
+	}
+
+	enable_irq_wake(dev->wake_n);
+
+	/* PCIE_WAKE_N should be enabled only during system suspend */
+	disable_irq(dev->wake_n);
+out:
 	return rc;
 }
 
 void __exit msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
 {
 	free_irq(PCIE20_INT_MSI, dev);
+	free_irq(dev->wake_n, dev);
 }
 
 void msm_pcie_destroy_irq(unsigned int irq)
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index 3635572..103eef0 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -532,7 +532,7 @@
 {
 	krait_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&krait_l2_pmu, "kraitl2", -1))
+	if (!armpmu_register(&krait_l2_pmu, "msm-l2", -1))
 		pmu_type = krait_l2_pmu.pmu.type;
 
 	return 0;
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index aae2552..2ad36df 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -877,7 +877,7 @@
 {
 	scorpion_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&scorpion_l2_pmu, "scorpionl2", -1))
+	if (!armpmu_register(&scorpion_l2_pmu, "msm-l2", -1))
 		pmu_type = scorpion_l2_pmu.pmu.type;
 
 	return 0;
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 58d5176..01cdb0b 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -117,11 +117,12 @@
 	void __iomem *base = drv->base;
 	unsigned long start_addr = drv->start_addr;
 
-	/* Deassert reset to Pronto */
+	/* Deassert reset to subsystem and wait for propagation */
 	reg = readl_relaxed(drv->reset_base);
 	reg &= ~CLK_CTL_WCNSS_RESTART_BIT;
 	writel_relaxed(reg, drv->reset_base);
 	mb();
+	udelay(2);
 
 	/* Configure boot address */
 	writel_relaxed(start_addr >> 16, base +
@@ -206,11 +207,12 @@
 	mb();
 	usleep_range(1000, 2000);
 
-	/* Deassert reset to Pronto */
+	/* Deassert reset to subsystem and wait for propagation */
 	reg = readl_relaxed(drv->reset_base);
 	reg &= ~CLK_CTL_WCNSS_RESTART_BIT;
 	writel_relaxed(reg, drv->reset_base);
 	mb();
+	udelay(2);
 
 	return 0;
 }
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index 62685ca..5c9c3c4 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -139,8 +139,10 @@
 	struct q6v5_data *drv = dev_get_drvdata(pil->dev);
 	int ret;
 
+	/* Deassert reset to subsystem and wait for propagation */
 	writel_relaxed(0, drv->restart_reg);
 	mb();
+	udelay(2);
 
 	/*
 	 * Bring subsystem out of reset and enable required
@@ -235,7 +237,7 @@
 	if (IS_ERR(drv->vreg))
 		return PTR_ERR(drv->vreg);
 
-	ret = regulator_set_voltage(drv->vreg, 1150000, 1150000);
+	ret = regulator_set_voltage(drv->vreg, 1050000, 1050000);
 	if (ret)
 		dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
 
diff --git a/arch/arm/mach-msm/scm-pas.c b/arch/arm/mach-msm/scm-pas.c
index 4096d9c..55ae2f8 100644
--- a/arch/arm/mach-msm/scm-pas.c
+++ b/arch/arm/mach-msm/scm-pas.c
@@ -94,7 +94,7 @@
 {
 	int ret = 0;
 
-	if (!scm_perf_client || !scm_bus_clk)
+	if (!scm_perf_client)
 		return -EINVAL;
 
 	mutex_lock(&scm_pas_bw_mutex);
@@ -102,7 +102,7 @@
 		ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
 		if (ret) {
 			pr_err("bandwidth request failed (%d)\n", ret);
-		} else {
+		} else if (scm_bus_clk) {
 			ret = clk_prepare_enable(scm_bus_clk);
 			if (ret)
 				pr_err("clock enable failed\n");
@@ -121,7 +121,8 @@
 	mutex_lock(&scm_pas_bw_mutex);
 	if (scm_pas_bw_count-- == 1) {
 		msm_bus_scale_client_update_request(scm_perf_client, 0);
-		clk_disable_unprepare(scm_bus_clk);
+		if (scm_bus_clk)
+			clk_disable_unprepare(scm_bus_clk);
 	}
 	mutex_unlock(&scm_pas_bw_mutex);
 }
@@ -190,16 +191,23 @@
 
 static int __init scm_pas_init(void)
 {
+	if (cpu_is_msm8974()) {
+		scm_pas_bw_tbl[0].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
+		scm_pas_bw_tbl[1].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
+	} else {
+		scm_bus_clk = clk_get_sys("scm", "bus_clk");
+		if (!IS_ERR(scm_bus_clk)) {
+			clk_set_rate(scm_bus_clk, 64000000);
+		} else {
+			scm_bus_clk = NULL;
+			pr_warn("unable to get bus clock\n");
+		}
+	}
+
 	scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
 	if (!scm_perf_client)
 		pr_warn("unable to register bus client\n");
-	scm_bus_clk = clk_get_sys("scm", "bus_clk");
-	if (!IS_ERR(scm_bus_clk)) {
-		clk_set_rate(scm_bus_clk, 64000000);
-	} else {
-		scm_bus_clk = NULL;
-		pr_warn("unable to get bus clock\n");
-	}
+
 	return 0;
 }
 module_init(scm_pas_init);
diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c
index aed71c0..f54d820 100644
--- a/drivers/base/genlock.c
+++ b/drivers/base/genlock.c
@@ -116,6 +116,7 @@
 struct genlock *genlock_create_lock(struct genlock_handle *handle)
 {
 	struct genlock *lock;
+	void *ret;
 
 	if (IS_ERR_OR_NULL(handle)) {
 		GENLOCK_LOG_ERR("Invalid handle\n");
@@ -145,8 +146,13 @@
 	 * other processes
 	 */
 
-	lock->file = anon_inode_getfile("genlock", &genlock_fops,
-		lock, O_RDWR);
+	ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR);
+	if (IS_ERR_OR_NULL(ret)) {
+		GENLOCK_LOG_ERR("Unable to create lock inode\n");
+		kfree(lock);
+		return ret;
+	}
+	lock->file = ret;
 
 	/* Attach the new lock to the handle */
 	handle->lock = lock;
@@ -660,12 +666,19 @@
 
 struct genlock_handle *genlock_get_handle(void)
 {
+	void *ret;
 	struct genlock_handle *handle = _genlock_get_handle();
 	if (IS_ERR(handle))
 		return handle;
 
-	handle->file = anon_inode_getfile("genlock-handle",
+	ret = anon_inode_getfile("genlock-handle",
 		&genlock_handle_fops, handle, O_RDWR);
+	if (IS_ERR_OR_NULL(ret)) {
+		GENLOCK_LOG_ERR("Unable to create handle inode\n");
+		kfree(handle);
+		return ret;
+	}
+	handle->file = ret;
 
 	return handle;
 }
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 4093935..b117309 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -5,6 +5,7 @@
  *  power management protocol extension to H4 to support AR300x Bluetooth Chip.
  *
  *  Copyright (c) 2009-2010 Atheros Communications Inc.
+ *  Copyright (c) 2012, Code Aurora Forum. All rights reserved.
  *
  *  Acknowledgements:
  *  This file is based on hci_h4.c, which was written
@@ -35,12 +36,53 @@
 #include <linux/errno.h>
 #include <linux/ioctl.h>
 #include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "hci_uart.h"
 
+unsigned int enableuartsleep;
+module_param(enableuartsleep, uint, 0644);
+/*
+ * Global variables
+ */
+/** Global state flags */
+static unsigned long flags;
+
+/** Tasklet to respond to change in hostwake line */
+static struct tasklet_struct hostwake_task;
+
+/** Transmission timer */
+static void bluesleep_tx_timer_expire(unsigned long data);
+static DEFINE_TIMER(tx_timer, bluesleep_tx_timer_expire, 0, 0);
+
+/** Lock for state transitions */
+static spinlock_t rw_lock;
+
+#define POLARITY_LOW 0
+#define POLARITY_HIGH 1
+
+struct bluesleep_info {
+	unsigned host_wake;			/* wake up host */
+	unsigned ext_wake;			/* wake up device */
+	unsigned host_wake_irq;
+	int irq_polarity;
+};
+
+/* 1 second timeout */
+#define TX_TIMER_INTERVAL  1
+
+/* state variable names and bit positions */
+#define BT_TXEXPIRED    0x01
+#define BT_SLEEPENABLE  0x02
+#define BT_SLEEPCMD	0x03
+
+/* global pointer to a single hci device. */
+static struct bluesleep_info *bsi;
+
 struct ath_struct {
 	struct hci_uart *hu;
 	unsigned int cur_sleep;
@@ -49,35 +91,30 @@
 	struct work_struct ctxtsw;
 };
 
+static void hostwake_interrupt(unsigned long data)
+{
+	printk(KERN_INFO " wakeup host\n");
+}
+
+static void modify_timer_task(void)
+{
+	spin_lock(&rw_lock);
+	mod_timer(&tx_timer, jiffies + (TX_TIMER_INTERVAL * HZ));
+	clear_bit(BT_TXEXPIRED, &flags);
+	spin_unlock(&rw_lock);
+
+}
+
 static int ath_wakeup_ar3k(struct tty_struct *tty)
 {
-	struct ktermios ktermios;
-	int status = tty->driver->ops->tiocmget(tty);
-
-	if (status & TIOCM_CTS)
-		return status;
-
-	/* Disable Automatic RTSCTS */
-	memcpy(&ktermios, tty->termios, sizeof(ktermios));
-	ktermios.c_cflag &= ~CRTSCTS;
-	tty_set_termios(tty, &ktermios);
-
-	/* Clear RTS first */
-	status = tty->driver->ops->tiocmget(tty);
-	tty->driver->ops->tiocmset(tty, 0x00, TIOCM_RTS);
-	mdelay(20);
-
-	/* Set RTS, wake up board */
-	status = tty->driver->ops->tiocmget(tty);
-	tty->driver->ops->tiocmset(tty, TIOCM_RTS, 0x00);
-	mdelay(20);
-
-	status = tty->driver->ops->tiocmget(tty);
-
-	/* Disable Automatic RTSCTS */
-	ktermios.c_cflag |= CRTSCTS;
-	status = tty_set_termios(tty, &ktermios);
-
+	int status = 0;
+	if (test_bit(BT_TXEXPIRED, &flags)) {
+		printk(KERN_INFO "wakeup device\n");
+		gpio_set_value(bsi->ext_wake, 1);
+		msleep(20);
+		gpio_set_value(bsi->ext_wake, 0);
+	}
+	modify_timer_task();
 	return status;
 }
 
@@ -94,12 +131,8 @@
 	tty = hu->tty;
 
 	/* verify and wake up controller */
-	if (ath->cur_sleep) {
+	if (test_bit(BT_SLEEPENABLE, &flags))
 		status = ath_wakeup_ar3k(tty);
-		if (!(status & TIOCM_CTS))
-			return;
-	}
-
 	/* Ready to send Data */
 	clear_bit(HCI_UART_SENDING, &hu->tx_state);
 	hci_uart_tx_wakeup(hu);
@@ -121,6 +154,11 @@
 	hu->priv = ath;
 	ath->hu = hu;
 
+	ath->cur_sleep = enableuartsleep;
+	if (ath->cur_sleep == 1) {
+		set_bit(BT_SLEEPENABLE, &flags);
+		modify_timer_task();
+	}
 	INIT_WORK(&ath->ctxtsw, ath_hci_uart_work);
 
 	return 0;
@@ -173,9 +211,10 @@
 	 */
 	if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
 		struct hci_command_hdr *hdr = (void *)skb->data;
-
-		if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP)
+		if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP) {
+			set_bit(BT_SLEEPCMD, &flags);
 			ath->cur_sleep = skb->data[HCI_COMMAND_HDR_SIZE];
+		}
 	}
 
 	BT_DBG("hu %p skb %p", hu, skb);
@@ -201,17 +240,55 @@
 /* Recv data */
 static int ath_recv(struct hci_uart *hu, void *data, int count)
 {
-	int ret;
+	struct ath_struct *ath = hu->priv;
+	unsigned int type;
 
-	ret = hci_recv_stream_fragment(hu->hdev, data, count);
-	if (ret < 0) {
+	if (hci_recv_stream_fragment(hu->hdev, data, count) < 0)
 		BT_ERR("Frame Reassembly Failed");
-		return ret;
-	}
 
+	if (count & test_bit(BT_SLEEPCMD, &flags)) {
+		struct sk_buff *skb = hu->hdev->reassembly[0];
+
+		if (!skb) {
+			struct { char type; } *pkt;
+
+			/* Start of the frame */
+			pkt = data;
+			type = pkt->type;
+		} else
+			type = bt_cb(skb)->pkt_type;
+
+		if (type == HCI_EVENT_PKT) {
+			clear_bit(BT_SLEEPCMD, &flags);
+			printk(KERN_INFO "cur_sleep:%d\n", ath->cur_sleep);
+			if (ath->cur_sleep == 1)
+				set_bit(BT_SLEEPENABLE, &flags);
+			else
+				clear_bit(BT_SLEEPENABLE, &flags);
+		}
+		if (test_bit(BT_SLEEPENABLE, &flags))
+			modify_timer_task();
+	}
 	return count;
 }
 
+static void bluesleep_tx_timer_expire(unsigned long data)
+{
+	if (!test_bit(BT_SLEEPENABLE, &flags))
+		return;
+	BT_DBG("Tx timer expired");
+	printk(KERN_INFO "Tx timer expired\n");
+
+	set_bit(BT_TXEXPIRED, &flags);
+}
+
+static irqreturn_t bluesleep_hostwake_isr(int irq, void *dev_id)
+{
+	/* schedule a tasklet to handle the change in the host wake line */
+	tasklet_schedule(&hostwake_task);
+	return IRQ_HANDLED;
+}
+
 static struct hci_uart_proto athp = {
 	.id = HCI_UART_ATH3K,
 	.open = ath_open,
@@ -222,19 +299,159 @@
 	.flush = ath_flush,
 };
 
+static int __init bluesleep_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+
+	bsi = kzalloc(sizeof(struct bluesleep_info), GFP_KERNEL);
+	if (!bsi) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+						"gpio_host_wake");
+	if (!res) {
+		BT_ERR("couldn't find host_wake gpio\n");
+		ret = -ENODEV;
+		goto free_bsi;
+	}
+	bsi->host_wake = res->start;
+
+	ret = gpio_request(bsi->host_wake, "bt_host_wake");
+	if (ret)
+		goto free_bsi;
+
+	/* configure host_wake as input */
+	ret = gpio_direction_input(bsi->host_wake);
+	if (ret < 0) {
+		pr_err("%s: gpio_direction_input failed for GPIO %d, error %d\n",
+			__func__, bsi->host_wake, ret);
+		gpio_free(bsi->host_wake);
+		goto free_bsi;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IO,
+						"gpio_ext_wake");
+	if (!res) {
+		BT_ERR("couldn't find ext_wake gpio\n");
+		ret = -ENODEV;
+		goto free_bt_host_wake;
+	}
+	bsi->ext_wake = res->start;
+
+	ret = gpio_request(bsi->ext_wake, "bt_ext_wake");
+	if (ret)
+		goto free_bt_host_wake;
+
+	/* configure ext_wake as output mode*/
+	ret = gpio_direction_output(bsi->ext_wake, 1);
+	if (ret < 0) {
+		pr_err("%s: gpio_direction_output failed for GPIO %d, error %d\n",
+			__func__, bsi->ext_wake, ret);
+		gpio_free(bsi->ext_wake);
+		goto free_bt_host_wake;
+	}
+	gpio_set_value(bsi->ext_wake, 0);
+
+	bsi->host_wake_irq = platform_get_irq_byname(pdev, "host_wake");
+	if (bsi->host_wake_irq < 0) {
+		BT_ERR("couldn't find host_wake irq\n");
+		ret = -ENODEV;
+		goto free_bt_ext_wake;
+	}
+
+	bsi->irq_polarity = POLARITY_LOW;	/* low edge (falling edge) */
+
+	/* Initialize spinlock. */
+	spin_lock_init(&rw_lock);
+
+	/* Initialize timer */
+	init_timer(&tx_timer);
+	tx_timer.function = bluesleep_tx_timer_expire;
+	tx_timer.data = 0;
+
+	/* initialize host wake tasklet */
+	tasklet_init(&hostwake_task, hostwake_interrupt, 0);
+
+	if (bsi->irq_polarity == POLARITY_LOW) {
+		ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+				IRQF_DISABLED | IRQF_TRIGGER_FALLING,
+				"bluetooth hostwake", NULL);
+	} else  {
+		ret = request_irq(bsi->host_wake_irq, bluesleep_hostwake_isr,
+				IRQF_DISABLED | IRQF_TRIGGER_RISING,
+				"bluetooth hostwake", NULL);
+	}
+	if (ret  < 0) {
+		BT_ERR("Couldn't acquire BT_HOST_WAKE IRQ");
+		goto free_bt_timer;
+	}
+
+	ret = enable_irq_wake(bsi->host_wake_irq);
+	if (ret < 0) {
+		BT_ERR("Couldn't enable BT_HOST_WAKE as wakeup interrupt");
+		free_irq(bsi->host_wake_irq, NULL);
+		goto free_bt_timer;
+	}
+
+	return 0;
+
+free_bt_timer:
+	del_timer(&tx_timer);
+free_bt_ext_wake:
+	gpio_free(bsi->ext_wake);
+free_bt_host_wake:
+	gpio_free(bsi->host_wake);
+free_bsi:
+	kfree(bsi);
+failed:
+	return ret;
+}
+
+static int bluesleep_remove(struct platform_device *pdev)
+{
+	/* assert bt wake */
+	gpio_set_value(bsi->ext_wake, 0);
+	if (disable_irq_wake(bsi->host_wake_irq))
+		BT_ERR("Couldn't disable hostwake IRQ wakeup mode\n");
+	free_irq(bsi->host_wake_irq, NULL);
+	del_timer_sync(&tx_timer);
+	gpio_free(bsi->host_wake);
+	gpio_free(bsi->ext_wake);
+	kfree(bsi);
+	return 0;
+}
+
+static struct platform_driver bluesleep_driver = {
+	.remove = bluesleep_remove,
+	.driver = {
+		.name = "bluesleep",
+		.owner = THIS_MODULE,
+	},
+};
+
 int __init ath_init(void)
 {
-	int err = hci_uart_register_proto(&athp);
+	int ret;
 
-	if (!err)
+	ret = hci_uart_register_proto(&athp);
+
+	if (!ret)
 		BT_INFO("HCIATH3K protocol initialized");
-	else
+	else {
 		BT_ERR("HCIATH3K protocol registration failed");
-
-	return err;
+		return ret;
+	}
+	ret = platform_driver_probe(&bluesleep_driver, bluesleep_probe);
+	if (ret)
+		return ret;
+	return 0;
 }
 
 int __exit ath_deinit(void)
 {
+	platform_driver_unregister(&bluesleep_driver);
 	return hci_uart_unregister_proto(&athp);
 }
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 95a85f2a..0febaf3 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -251,6 +251,7 @@
 	int logging_mode;
 	int mask_check;
 	int logging_process_id;
+	struct task_struct *socket_process;
 #ifdef CONFIG_DIAG_SDIO_PIPE
 	unsigned char *buf_in_sdio;
 	unsigned char *usb_buf_mdm_out;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 240a514..30504bc 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -230,6 +230,13 @@
 	* This call will remove any pending registrations of such client
 	*/
 	diagchar_ioctl(NULL, DIAG_IOCTL_DCI_DEINIT, 0);
+
+	/* If the exiting process is the socket process */
+	if (driver->socket_process &&
+		(driver->socket_process->tgid == current->tgid)) {
+		driver->socket_process = NULL;
+	}
+
 #ifdef CONFIG_DIAG_OVER_USB
 	/* If the SD logging process exits, change logging to USB mode */
 	if (driver->logging_process_id == current->tgid) {
@@ -344,6 +351,7 @@
 	void *temp_buf;
 	uint16_t support_list = 0;
 	struct dci_notification_tbl *notify_params;
+	int status;
 
 	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
 		struct bindpkt_params_per_process *pkt_params =
@@ -480,12 +488,32 @@
 		mutex_lock(&driver->diagchar_mutex);
 		temp = driver->logging_mode;
 		driver->logging_mode = (int)ioarg;
-		if (driver->logging_mode == MEMORY_DEVICE_MODE)
+		if (driver->logging_mode == MEMORY_DEVICE_MODE) {
 			driver->mask_check = 1;
+			if (driver->socket_process) {
+				/*
+				 * Notify the socket logging process that we
+				 * are switching to MEMORY_DEVICE_MODE
+				 */
+				status = send_sig(SIGCONT,
+					 driver->socket_process, 0);
+				if (status) {
+					pr_err("diag: %s, Error notifying ",
+						__func__);
+					pr_err("socket process, status: %d\n",
+						status);
+				}
+			}
+		}
 		if (driver->logging_mode == UART_MODE) {
 			driver->mask_check = 0;
 			driver->logging_mode = MEMORY_DEVICE_MODE;
 		}
+		if (driver->logging_mode == SOCKET_MODE) {
+			driver->socket_process = current;
+			driver->mask_check = 0;
+			driver->logging_mode = MEMORY_DEVICE_MODE;
+		}
 		driver->logging_process_id = current->tgid;
 		mutex_unlock(&driver->diagchar_mutex);
 		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
@@ -1251,6 +1279,7 @@
 		driver->poolsize_write_struct = poolsize_write_struct;
 		driver->num_clients = max_clients;
 		driver->logging_mode = USB_MODE;
+		driver->socket_process = NULL;
 		driver->mask_check = 0;
 		mutex_init(&driver->diagchar_mutex);
 		init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 741d4fa..4fe1f01 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1306,10 +1306,9 @@
 		return PTR_ERR(dmabuf);
 	}
 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
-	if (fd < 0) {
+	if (fd < 0)
 		dma_buf_put(dmabuf);
-		ion_buffer_put(buffer);
-	}
+
 	return fd;
 }
 EXPORT_SYMBOL(ion_share_dma_buf);
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index eec3fe0..c65a000 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -18,15 +18,82 @@
 #include <linux/slab.h>
 #include <linux/memory_alloc.h>
 #include <linux/fmem.h>
+#include <linux/of.h>
 #include <mach/ion.h>
 #include <mach/msm_memtypes.h>
 #include "../ion_priv.h"
 #include "ion_cp_common.h"
 
+#define ION_COMPAT_STR	"qcom,msm-ion"
+#define ION_COMPAT_MEM_RESERVE_STR "qcom,msm-ion-reserve"
+
 static struct ion_device *idev;
 static int num_heaps;
 static struct ion_heap **heaps;
 
+struct ion_heap_desc {
+	unsigned int id;
+	enum ion_heap_type type;
+	const char *name;
+	unsigned int permission_type;
+};
+
+
+static struct ion_heap_desc ion_heap_meta[] = {
+	{
+		.id	= ION_SYSTEM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_SYSTEM,
+		.name	= ION_VMALLOC_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_MM_HEAP_NAME,
+		.permission_type = IPT_TYPE_MM_CARVEOUT,
+	},
+	{
+		.id	= ION_MM_FIRMWARE_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_MM_FIRMWARE_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MFC_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_MFC_HEAP_NAME,
+		.permission_type = IPT_TYPE_MFC_SHAREDMEM,
+	},
+	{
+		.id	= ION_SF_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_SF_HEAP_NAME,
+	},
+	{
+		.id	= ION_IOMMU_HEAP_ID,
+		.type	= ION_HEAP_TYPE_IOMMU,
+		.name	= ION_IOMMU_HEAP_NAME,
+	},
+	{
+		.id	= ION_QSECOM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_QSECOM_HEAP_NAME,
+	},
+	{
+		.id	= ION_AUDIO_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_AUDIO_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_WB_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_WB_HEAP_NAME,
+	},
+	{
+		.id	= ION_CAMERA_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_CAMERA_HEAP_NAME,
+	},
+};
+
 struct ion_client *msm_ion_client_create(unsigned int heap_mask,
 					const char *name)
 {
@@ -269,11 +336,243 @@
 	}
 }
 
+static int msm_init_extra_data(struct ion_platform_heap *heap,
+			       const struct ion_heap_desc *heap_desc)
+{
+	int ret = 0;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_CP:
+	{
+		heap->extra_data = kzalloc(sizeof(struct ion_cp_heap_pdata),
+					   GFP_KERNEL);
+		if (!heap->extra_data) {
+			ret = -ENOMEM;
+		} else {
+			struct ion_cp_heap_pdata *extra = heap->extra_data;
+			extra->permission_type = heap_desc->permission_type;
+		}
+		break;
+	}
+	case ION_HEAP_TYPE_CARVEOUT:
+	{
+		heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata),
+					   GFP_KERNEL);
+		if (!heap->extra_data)
+			ret = -ENOMEM;
+		break;
+	}
+	default:
+		heap->extra_data = 0;
+		break;
+	}
+	return ret;
+}
+
+static int msm_ion_populate_heap(struct ion_platform_heap *heap)
+{
+	unsigned int i;
+	int ret = -EINVAL;
+	unsigned int len = ARRAY_SIZE(ion_heap_meta);
+	for (i = 0; i < len; ++i) {
+		if (ion_heap_meta[i].id == heap->id) {
+			heap->name = ion_heap_meta[i].name;
+			heap->type = ion_heap_meta[i].type;
+			ret = msm_init_extra_data(heap, &ion_heap_meta[i]);
+			break;
+		}
+	}
+	if (ret)
+		pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
+	return ret;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+	unsigned int i;
+	for (i = 0; i < pdata->nr; ++i)
+		kfree(pdata->heaps[i].extra_data);
+	kfree(pdata);
+}
+
+static int memtype_to_ion_memtype[] = {
+	[MEMTYPE_SMI_KERNEL] = ION_SMI_TYPE,
+	[MEMTYPE_SMI]	= ION_SMI_TYPE,
+	[MEMTYPE_EBI0] = ION_EBI_TYPE,
+	[MEMTYPE_EBI1] = ION_EBI_TYPE,
+};
+
+static void msm_ion_get_heap_align(struct device_node *node,
+				   struct ion_platform_heap *heap)
+{
+	unsigned int val;
+
+	int ret = of_property_read_u32(node, "qcom,heap-align", &val);
+	if (!ret) {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CP:
+		{
+			struct ion_cp_heap_pdata *extra =
+						heap->extra_data;
+			extra->align = val;
+			break;
+		}
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra =
+						heap->extra_data;
+			extra->align = val;
+			break;
+		}
+		default:
+			pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n",
+					heap->name);
+			break;
+		}
+	}
+}
+
+static int msm_ion_get_heap_size(struct device_node *node,
+				 struct ion_platform_heap *heap)
+{
+	unsigned int val;
+	int ret = 0;
+	const char *memory_name_prop;
+
+	ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
+	if (!ret) {
+		heap->size = val;
+		ret = of_property_read_string(node,
+					      "qcom,memory-reservation-type",
+					      &memory_name_prop);
+
+		if (!ret && memory_name_prop) {
+			val = msm_get_memory_type_from_name(memory_name_prop);
+			if (val < 0) {
+				ret = -EINVAL;
+				goto out;
+			}
+			heap->memory_type = memtype_to_ion_memtype[val];
+		}
+		if (heap->size && (ret || !memory_name_prop)) {
+			pr_err("%s: Need to specify reservation type\n",
+				__func__);
+			ret = -EINVAL;
+		}
+	} else {
+		ret = 0;
+	}
+out:
+	return ret;
+}
+
+
+static void msm_ion_get_heap_adjacent(struct device_node *node,
+				      struct ion_platform_heap *heap)
+{
+	unsigned int val;
+	int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val);
+	if (!ret) {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra = heap->extra_data;
+			extra->adjacent_mem_id = val;
+			break;
+		}
+		default:
+			pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n",
+				heap->name);
+			break;
+		}
+	} else {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra = heap->extra_data;
+			extra->adjacent_mem_id = INVALID_HEAP_ID;
+			break;
+		}
+		default:
+			break;
+		}
+	}
+}
+
+static struct ion_platform_data *msm_ion_parse_dt(
+					const struct device_node *dt_node)
+{
+	struct ion_platform_data *pdata = 0;
+	struct device_node *node;
+	uint32_t val = 0;
+	int ret = 0;
+	uint32_t num_heaps = 0;
+	int idx = 0;
+
+	for_each_child_of_node(dt_node, node)
+		num_heaps++;
+
+	if (!num_heaps)
+		return ERR_PTR(-EINVAL);
+
+	pdata = kzalloc(sizeof(struct ion_platform_data) +
+			num_heaps*sizeof(struct ion_platform_heap), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->nr = num_heaps;
+
+	for_each_child_of_node(dt_node, node) {
+		/**
+		 * TODO: Replace this with of_get_address() when this patch
+		 * gets merged: http://
+		 * permalink.gmane.org/gmane.linux.drivers.devicetree/18614
+		*/
+		ret = of_property_read_u32(node, "reg", &val);
+		if (ret) {
+			pr_err("%s: Unable to find reg key", __func__);
+			goto free_heaps;
+		}
+		pdata->heaps[idx].id = val;
+
+		ret = msm_ion_populate_heap(&pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_align(node, &pdata->heaps[idx]);
+
+		ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]);
+
+		++idx;
+	}
+	return pdata;
+
+free_heaps:
+	free_pdata(pdata);
+	return ERR_PTR(ret);
+}
+
 static int msm_ion_probe(struct platform_device *pdev)
 {
-	struct ion_platform_data *pdata = pdev->dev.platform_data;
-	int err;
+	struct ion_platform_data *pdata;
+	unsigned int pdata_needs_to_be_freed;
+	int err = -1;
 	int i;
+	if (pdev->dev.of_node) {
+		pdata = msm_ion_parse_dt(pdev->dev.of_node);
+		if (IS_ERR(pdata)) {
+			err = PTR_ERR(pdata);
+			goto out;
+		}
+		pdata_needs_to_be_freed = 1;
+	} else {
+		pdata = pdev->dev.platform_data;
+		pdata_needs_to_be_freed = 0;
+	}
 
 	num_heaps = pdata->nr;
 
@@ -315,6 +614,8 @@
 
 		ion_device_add_heap(idev, heaps[i]);
 	}
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
 
 	check_for_heap_overlap(pdata->heaps, num_heaps);
 	platform_set_drvdata(pdev, idev);
@@ -322,6 +623,8 @@
 
 freeheaps:
 	kfree(heaps);
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
 out:
 	return err;
 }
@@ -339,10 +642,19 @@
 	return 0;
 }
 
+static struct of_device_id msm_ion_match_table[] = {
+	{.compatible = ION_COMPAT_STR},
+	{},
+};
+EXPORT_COMPAT(ION_COMPAT_MEM_RESERVE_STR);
+
 static struct platform_driver msm_ion_driver = {
 	.probe = msm_ion_probe,
 	.remove = msm_ion_remove,
-	.driver = { .name = "ion-msm" }
+	.driver = {
+		.name = "ion-msm",
+		.of_match_table = msm_ion_match_table,
+	},
 };
 
 static int __init msm_ion_init(void)
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index 8ec9431..33fcbfd 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -248,6 +248,8 @@
 #define A3XX_VBIF_OUT_WR_LIM_CONF0 0x3035
 #define A3XX_VBIF_DDR_OUT_MAX_BURST 0x3036
 #define A3XX_VBIF_ARB_CTL 0x303C
+#define A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x3049
+#define A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x3058
 #define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
 #define A3XX_VBIF_OUT_AXI_AOOO 0x305F
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index fc51970..f7d1e59 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -15,8 +15,14 @@
 #include <linux/vmalloc.h>
 #include <linux/ioctl.h>
 #include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <mach/socinfo.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_dcvs.h>
+#include <mach/msm_dcvs_scm.h>
 
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
@@ -178,7 +184,9 @@
 	{ ADRENO_REV_A320, 3, 2, 0, ANY_ID,
 		"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
 		512, 0, 2, SZ_512K },
-
+	{ ADRENO_REV_A330, 3, 3, 0, 0,
+		"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+		512, 0, 2, SZ_1M },
 };
 
 static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
@@ -647,12 +655,520 @@
 	adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
 }
 
+static struct platform_device_id adreno_id_table[] = {
+	{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
+	{},
+};
+
+MODULE_DEVICE_TABLE(platform, adreno_id_table);
+
+static struct of_device_id adreno_match_table[] = {
+	{ .compatible = "qcom,kgsl-3d0", },
+	{}
+};
+
+static inline int adreno_of_read_property(struct device_node *node,
+	const char *prop, unsigned int *ptr)
+{
+	int ret = of_property_read_u32(node, prop, ptr);
+	if (ret)
+		KGSL_CORE_ERR("Unable to read '%s'\n", prop);
+	return ret;
+}
+
+static struct device_node *adreno_of_find_subnode(struct device_node *parent,
+	const char *name)
+{
+	struct device_node *child;
+
+	for_each_child_of_node(parent, child) {
+		if (of_device_is_compatible(child, name))
+			return child;
+	}
+
+	return NULL;
+}
+
+static int adreno_of_get_pwrlevels(struct device_node *parent,
+	struct kgsl_device_platform_data *pdata)
+{
+	struct device_node *node, *child;
+	int ret = -EINVAL;
+
+	node = adreno_of_find_subnode(parent, "qcom,gpu-pwrlevels");
+
+	if (node == NULL) {
+		KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
+		return -EINVAL;
+	}
+
+	pdata->num_levels = 0;
+
+	for_each_child_of_node(node, child) {
+		unsigned int index;
+		struct kgsl_pwrlevel *level;
+
+		if (adreno_of_read_property(child, "reg", &index))
+			goto done;
+
+		if (index >= KGSL_MAX_PWRLEVELS) {
+			KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
+				index);
+			continue;
+		}
+
+		if (index >= pdata->num_levels)
+			pdata->num_levels = index + 1;
+
+		level = &pdata->pwrlevel[index];
+
+		if (adreno_of_read_property(child, "qcom,gpu-freq",
+			&level->gpu_freq))
+			goto done;
+
+		if (adreno_of_read_property(child, "qcom,bus-freq",
+			&level->bus_freq))
+			goto done;
+
+		if (adreno_of_read_property(child, "qcom,io-fraction",
+			&level->io_fraction))
+			level->io_fraction = 0;
+	}
+
+	if (adreno_of_read_property(parent, "qcom,initial-pwrlevel",
+		&pdata->init_level))
+		pdata->init_level = 1;
+
+	if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
+		KGSL_CORE_ERR("Initial power level out of range\n");
+		pdata->init_level = 1;
+	}
+
+	ret = 0;
+done:
+	return ret;
+
+}
+static void adreno_of_free_bus_scale_info(struct msm_bus_scale_pdata *pdata)
+{
+	int i;
+
+	if (pdata == NULL)
+		return;
+
+	for (i = 0;  pdata->usecase && i < pdata->num_usecases; i++)
+		kfree(pdata->usecase[i].vectors);
+
+	kfree(pdata->usecase);
+	kfree(pdata);
+}
+
+struct msm_bus_scale_pdata *adreno_of_get_bus_scale(struct device_node *node)
+{
+	static int bus_vectors_src[3] = {MSM_BUS_MASTER_GRAPHICS_3D,
+		MSM_BUS_MASTER_GRAPHICS_3D_PORT1, MSM_BUS_MASTER_V_OCMEM_GFX3D};
+	static int bus_vectors_dst[2] = {MSM_BUS_SLAVE_EBI_CH0,
+		MSM_BUS_SLAVE_OCMEM};
+	const unsigned int *vectors;
+	struct msm_bus_scale_pdata *pdata;
+	int i, j, len, num_paths;
+	int ret = -EINVAL;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+	if (!pdata) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (adreno_of_read_property(node, "qcom,grp3d-num-bus-scale-usecases",
+		&pdata->num_usecases)) {
+		pdata->num_usecases = 0;
+		goto err;
+	}
+
+	pdata->usecase =  kzalloc(pdata->num_usecases *
+		sizeof(struct msm_bus_paths), GFP_KERNEL);
+
+	if (pdata->usecase == NULL) {
+		KGSL_CORE_ERR("kzalloc (%d) failed\n",
+			pdata->num_usecases * sizeof(struct msm_bus_paths));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (adreno_of_read_property(node, "qcom,grp3d-num-vectors-per-usecase",
+		&num_paths))
+		goto err;
+
+	vectors = of_get_property(node, "qcom,grp3d-vectors", &len);
+
+	if (len != pdata->num_usecases * num_paths *
+		sizeof(struct msm_bus_vectors)) {
+		KGSL_CORE_ERR("Invalid size for the bus scale vectors\n");
+		goto err;
+	}
+
+	for (i = 0; i < pdata->num_usecases; i++) {
+		pdata->usecase[i].num_paths = num_paths;
+		pdata->usecase[i].vectors = kzalloc(num_paths *
+						sizeof(struct msm_bus_vectors),
+						GFP_KERNEL);
+		if (!pdata->usecase[i].vectors) {
+			KGSL_CORE_ERR("kzalloc(%d) failed\n",
+				num_paths * sizeof(struct msm_bus_vectors));
+			ret = -ENOMEM;
+			goto err;
+		}
+		for (j = 0; j < num_paths; j++) {
+			int index = (i * num_paths + j) * 4;
+			pdata->usecase[i].vectors[j].src =
+				bus_vectors_src[be32_to_cpu(vectors[index])];
+			pdata->usecase[i].vectors[j].dst =
+				bus_vectors_dst[
+					be32_to_cpu(vectors[index + 1])];
+			pdata->usecase[i].vectors[j].ab =
+				be32_to_cpu(vectors[index + 2]);
+			pdata->usecase[i].vectors[j].ib =
+				KGSL_CONVERT_TO_MBPS(
+					be32_to_cpu(vectors[index + 3]));
+		}
+	}
+
+	pdata->name = "grp3d";
+
+	return pdata;
+
+err:
+	adreno_of_free_bus_scale_info(pdata);
+
+	return ERR_PTR(ret);
+}
+
+static struct msm_dcvs_core_info *adreno_of_get_dcvs(struct device_node *parent)
+{
+	struct device_node *node, *child;
+	struct msm_dcvs_core_info *info = NULL;
+	int count = 0;
+	int ret = -EINVAL;
+
+	node = adreno_of_find_subnode(parent, "qcom,dcvs-core-info");
+	if (node == NULL)
+		return ERR_PTR(-EINVAL);
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (info == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*info));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for_each_child_of_node(node, child)
+		count++;
+
+	info->core_param.num_freq = count;
+
+	info->freq_tbl = kzalloc(info->core_param.num_freq *
+			sizeof(struct msm_dcvs_freq_entry),
+			GFP_KERNEL);
+
+	if (info->freq_tbl == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			info->core_param.num_freq *
+			sizeof(struct msm_dcvs_freq_entry));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for_each_child_of_node(node, child) {
+		unsigned int index;
+
+		if (adreno_of_read_property(child, "reg", &index))
+			goto err;
+
+		if (index >= info->core_param.num_freq) {
+			KGSL_CORE_ERR("DCVS freq entry %d is out of range\n",
+				index);
+			continue;
+		}
+
+		if (adreno_of_read_property(child, "qcom,freq",
+			&info->freq_tbl[index].freq))
+			goto err;
+
+		if (adreno_of_read_property(child, "qcom,idle-energy",
+			&info->freq_tbl[index].idle_energy))
+			info->freq_tbl[index].idle_energy = 0;
+
+		if (adreno_of_read_property(child, "qcom,active-energy",
+			&info->freq_tbl[index].active_energy))
+			info->freq_tbl[index].active_energy = 0;
+	}
+
+	if (adreno_of_read_property(node, "qcom,core-max-time-us",
+		&info->core_param.max_time_us))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-slack-time-us",
+		&info->algo_param.slack_time_us))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-disable-pc-threshold",
+		&info->algo_param.disable_pc_threshold))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-ss-window-size",
+		&info->algo_param.ss_window_size))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-ss-util-pct",
+		&info->algo_param.ss_util_pct))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-em-max-util-pct",
+		&info->algo_param.em_max_util_pct))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-ss-iobusy-conv",
+		&info->algo_param.ss_iobusy_conv))
+		goto err;
+
+	return info;
+
+err:
+	if (info)
+		kfree(info->freq_tbl);
+
+	kfree(info);
+
+	return ERR_PTR(ret);
+}
+
+static int adreno_of_get_iommu(struct device_node *parent,
+	struct kgsl_device_platform_data *pdata)
+{
+	struct device_node *node, *child;
+	struct kgsl_device_iommu_data *data = NULL;
+	struct kgsl_iommu_ctx *ctxs = NULL;
+	u32 reg_val[2];
+	int ctx_index = 0;
+
+	node = of_parse_phandle(parent, "iommu", 0);
+	if (node == NULL)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (data == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*data));
+		goto err;
+	}
+
+	if (of_property_read_u32_array(node, "reg", reg_val, 2))
+		goto err;
+
+	data->physstart = reg_val[0];
+	data->physend = data->physstart + reg_val[1] - 1;
+
+	data->iommu_ctx_count = 0;
+
+	for_each_child_of_node(node, child)
+		data->iommu_ctx_count++;
+
+	ctxs = kzalloc(data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx),
+		GFP_KERNEL);
+
+	if (ctxs == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx));
+		goto err;
+	}
+
+	for_each_child_of_node(node, child) {
+		int ret = of_property_read_string(child, "label",
+				&ctxs[ctx_index].iommu_ctx_name);
+
+		if (ret) {
+			KGSL_CORE_ERR("Unable to read KGSL IOMMU 'label'\n");
+			goto err;
+		}
+
+		if (adreno_of_read_property(child, "qcom,iommu-ctx-sids",
+			&ctxs[ctx_index].ctx_id))
+			goto err;
+
+		ctx_index++;
+	}
+
+	data->iommu_ctxs = ctxs;
+
+	pdata->iommu_data = data;
+	pdata->iommu_count = 1;
+
+	return 0;
+
+err:
+	kfree(ctxs);
+	kfree(data);
+
+	return -EINVAL;
+}
+
+static int adreno_of_get_pdata(struct platform_device *pdev)
+{
+	struct kgsl_device_platform_data *pdata = NULL;
+	struct kgsl_device *device;
+	int ret = -EINVAL;
+
+	pdev->id_entry = adreno_id_table;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata)
+		return 0;
+
+	if (of_property_read_string(pdev->dev.of_node, "label", &pdev->name)) {
+		KGSL_CORE_ERR("Unable to read 'label'\n");
+		goto err;
+	}
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,id", &pdev->id))
+		goto err;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (pdata == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,chipid",
+		&pdata->chipid))
+		goto err;
+
+	/* pwrlevel Data */
+	ret = adreno_of_get_pwrlevels(pdev->dev.of_node, pdata);
+	if (ret)
+		goto err;
+
+	/* Default value is 83, if not found in DT */
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
+		&pdata->idle_timeout))
+		pdata->idle_timeout = 83;
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,nap-allowed",
+		&pdata->nap_allowed))
+		pdata->nap_allowed = 1;
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
+		&pdata->clk_map))
+		goto err;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+	if (device->id != KGSL_DEVICE_3D0)
+		goto err;
+
+	/* Bus Scale Data */
+
+	pdata->bus_scale_table = adreno_of_get_bus_scale(pdev->dev.of_node);
+	if (IS_ERR_OR_NULL(pdata->bus_scale_table)) {
+		ret = PTR_ERR(pdata->bus_scale_table);
+		goto err;
+	}
+
+	pdata->core_info = adreno_of_get_dcvs(pdev->dev.of_node);
+	if (IS_ERR_OR_NULL(pdata->core_info)) {
+		ret = PTR_ERR(pdata->core_info);
+		goto err;
+	}
+
+	ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
+	if (ret)
+		goto err;
+
+	pdev->dev.platform_data = pdata;
+	return 0;
+
+err:
+	if (pdata) {
+		adreno_of_free_bus_scale_info(pdata->bus_scale_table);
+		if (pdata->core_info)
+			kfree(pdata->core_info->freq_tbl);
+		kfree(pdata->core_info);
+
+		if (pdata->iommu_data)
+			kfree(pdata->iommu_data->iommu_ctxs);
+
+		kfree(pdata->iommu_data);
+	}
+
+	kfree(pdata);
+
+	return ret;
+}
+
+#ifdef CONFIG_MSM_OCMEM
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+	if (adreno_dev->gpurev != ADRENO_REV_A330)
+		return 0;
+
+	/* OCMEM is only needed once, do not support consective allocation */
+	if (adreno_dev->ocmem_hdl != NULL)
+		return 0;
+
+	adreno_dev->ocmem_hdl =
+		ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
+	if (adreno_dev->ocmem_hdl == NULL)
+		return -ENOMEM;
+
+	adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
+	adreno_dev->gmem_base = adreno_dev->ocmem_hdl->addr;
+
+	return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+	if (adreno_dev->gpurev != ADRENO_REV_A330)
+		return;
+
+	if (adreno_dev->ocmem_hdl == NULL)
+		return;
+
+	ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
+	adreno_dev->ocmem_hdl = NULL;
+}
+#else
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+	return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+}
+#endif
+
 static int __devinit
 adreno_probe(struct platform_device *pdev)
 {
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
 	int status = -EINVAL;
+	bool is_dt;
+
+	is_dt = of_match_device(adreno_match_table, &pdev->dev);
+
+	if (is_dt && pdev->dev.of_node) {
+		status = adreno_of_get_pdata(pdev);
+		if (status)
+			goto error_return;
+	}
 
 	device = (struct kgsl_device *)pdev->id_entry->driver_data;
 	adreno_dev = ADRENO_DEVICE(device);
@@ -678,6 +1194,7 @@
 	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
 error:
 	device->parentdev = NULL;
+error_return:
 	return status;
 }
 
@@ -740,6 +1257,12 @@
 	if (status)
 		goto error_clk_off;
 
+	status = adreno_ocmem_gmem_malloc(adreno_dev);
+	if (status) {
+		KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
+		goto error_mmu_off;
+	}
+
 	/* Start the GPU */
 	adreno_dev->gpudev->start(adreno_dev);
 
@@ -756,7 +1279,10 @@
 	}
 
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
+
 error_clk_off:
 	kgsl_pwrctrl_disable(device);
 
@@ -777,6 +1303,8 @@
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 	del_timer_sync(&device->idle_timer);
 
+	adreno_ocmem_gmem_free(adreno_dev);
+
 	/* Power down the device */
 	kgsl_pwrctrl_disable(device);
 
@@ -1181,7 +1709,7 @@
 				/*NOTE: with mmu enabled, gpuaddr doesn't mean
 				 * anything to mmap().
 				 */
-				shadowprop.gpuaddr = device->memstore.physaddr;
+				shadowprop.gpuaddr = device->memstore.gpuaddr;
 				shadowprop.size = device->memstore.size;
 				/* GSL needs this to be set, even if it
 				   appears to be meaningless */
@@ -1590,9 +2118,8 @@
 			cmds[1] = 0;
 
 			if (adreno_dev->drawctxt_active)
-				adreno_ringbuffer_issuecmds(device,
-					adreno_dev->drawctxt_active,
-					KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
+				adreno_ringbuffer_issuecmds_intr(device,
+						context, &cmds[0], 2);
 			else
 				/* We would never call this function if there
 				 * was no active contexts running */
@@ -1746,6 +2273,13 @@
 	} while (time_elapsed < msecs);
 
 hang_dump:
+	/*
+	 * Check if timestamp has retired here because we may have hit
+	 * recovery which can take some time and cause waiting threads
+	 * to timeout
+	 */
+	if (kgsl_check_timestamp(device, context, timestamp))
+		goto done;
 	status = -ETIMEDOUT;
 	KGSL_DRV_ERR(device,
 		     "Device hang detected while waiting for timestamp: "
@@ -1920,12 +2454,6 @@
 	.setproperty = adreno_setproperty,
 };
 
-static struct platform_device_id adreno_id_table[] = {
-	{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
-	{ },
-};
-MODULE_DEVICE_TABLE(platform, adreno_id_table);
-
 static struct platform_driver adreno_platform_driver = {
 	.probe = adreno_probe,
 	.remove = __devexit_p(adreno_remove),
@@ -1936,6 +2464,7 @@
 		.owner = THIS_MODULE,
 		.name = DEVICE_3D_NAME,
 		.pm = &kgsl_pm_ops,
+		.of_match_table = adreno_match_table,
 	}
 };
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 57f4859..279e7ed 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -17,6 +17,7 @@
 #include "adreno_drawctxt.h"
 #include "adreno_ringbuffer.h"
 #include "kgsl_iommu.h"
+#include <mach/ocmem.h>
 
 #define DEVICE_3D_NAME "kgsl-3d"
 #define DEVICE_3D0_NAME "kgsl-3d0"
@@ -32,7 +33,7 @@
 /* Flags to control command packet settings */
 #define KGSL_CMD_FLAGS_NONE             0x00000000
 #define KGSL_CMD_FLAGS_PMODE		0x00000001
-#define KGSL_CMD_FLAGS_NO_TS_CMP	0x00000002
+#define KGSL_CMD_FLAGS_DUMMY_INTR_CMD	0x00000002
 
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
@@ -61,6 +62,7 @@
 	ADRENO_REV_A225 = 225,
 	ADRENO_REV_A305 = 305,
 	ADRENO_REV_A320 = 320,
+	ADRENO_REV_A330 = 330,
 };
 
 struct adreno_gpudev;
@@ -87,6 +89,7 @@
 	unsigned int instruction_size;
 	unsigned int ib_check_level;
 	unsigned int fast_hang_detect;
+	struct ocmem_buf *ocmem_hdl;
 };
 
 struct adreno_gpudev {
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 5a76c86..5ba3778 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1822,7 +1822,8 @@
 	if (state) {
 		adreno_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK);
 		adreno_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK);
-		adreno_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+		adreno_regwrite(device, MH_INTERRUPT_MASK,
+			kgsl_mmu_get_int_mask());
 	} else {
 		adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
 		adreno_regwrite(device, REG_CP_INT_CNTL, 0);
@@ -1982,7 +1983,13 @@
 			0x18000000);
 	}
 
-	adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
+	if (adreno_is_a203(adreno_dev))
+		/* For A203 increase number of clocks that RBBM
+		 * will wait before de-asserting Register Clock
+		 * Active signal */
+		adreno_regwrite(device, REG_RBBM_CNTL, 0x0000FFFF);
+	else
+		adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
 
 	adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
 	adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index bb89067..2dbfd8f 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2701,24 +2701,46 @@
 	struct kgsl_device *device = &adreno_dev->dev;
 
 	/* Set up 16 deep read/write request queues */
+	if (adreno_dev->gpurev == ADRENO_REV_A330) {
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x00001818);
+		adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00001818);
+		adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00001818);
+		adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x00001818);
+		/* Enable WR-REQ */
+		adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF);
 
-	adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303);
-	adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+		/* Set up round robin arbitration between both AXI ports */
+		adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+		/* Set up VBIF_ROUND_ROBIN_QOS_ARB */
+		adreno_regwrite(device, A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
 
-	/* Enable WR-REQ */
-	adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x000000FF);
+		/* Set up AOOO */
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x00000FFF);
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x0FFF0FFF);
 
-	/* Set up round robin arbitration between both AXI ports */
-	adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+		/* VBIF AXI AMEMTYPE CONFIG */
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0,
+			0x22222222);
+	} else {
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+		/* Enable WR-REQ */
+		adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF);
 
-	/* Set up AOOO */
-	adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C);
-	adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C);
+		/* Set up round robin arbitration between both AXI ports */
+		adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+		/* Set up AOOO */
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C);
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C);
+	}
 
 	if (cpu_is_apq8064()) {
 		/* Enable 1K sort */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 098c4f5..6c74dfa 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -147,6 +147,7 @@
 {
 	struct adreno_context *drawctxt;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 	int ret;
 
 	drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
@@ -157,6 +158,7 @@
 	drawctxt->pagetable = pagetable;
 	drawctxt->bin_base_offset = 0;
 	drawctxt->id = context->id;
+	rb->timestamp[context->id] = 0;
 
 	if (flags & KGSL_CONTEXT_PREAMBLE)
 		drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
@@ -174,6 +176,12 @@
 	kgsl_sharedmem_writel(&device->memstore,
 			KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
 			KGSL_INIT_REFTIMESTAMP);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_MEMSTORE_OFFSET(drawctxt->id, ts_cmp_enable), 0);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_MEMSTORE_OFFSET(drawctxt->id, soptimestamp), 0);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_MEMSTORE_OFFSET(drawctxt->id, eoptimestamp), 0);
 
 	context->devctxt = drawctxt;
 	return 0;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 86a349a..49786ba 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -404,11 +404,8 @@
 
 void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
 {
-	if (rb->flags & KGSL_FLAGS_STARTED) {
-		/* ME_HALT */
-		adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
+	if (rb->flags & KGSL_FLAGS_STARTED)
 		rb->flags &= ~KGSL_FLAGS_STARTED;
-	}
 }
 
 int adreno_ringbuffer_init(struct kgsl_device *device)
@@ -494,9 +491,9 @@
 	*  error checking if needed
 	*/
 	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
-	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
 	/* 2 dwords to store the start of command sequence */
 	total_sizedwords += 2;
+	total_sizedwords += context ? 7 : 0;
 
 	if (adreno_is_a3xx(adreno_dev))
 		total_sizedwords += 7;
@@ -548,9 +545,10 @@
 
 	/* always increment the global timestamp. once. */
 	rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
-	if (context) {
+
+	if (context && !(flags & KGSL_CMD_FLAGS_DUMMY_INTR_CMD)) {
 		if (context_id == KGSL_MEMSTORE_GLOBAL)
-			rb->timestamp[context_id] =
+			rb->timestamp[context->id] =
 				rb->timestamp[KGSL_MEMSTORE_GLOBAL];
 		else
 			rb->timestamp[context_id]++;
@@ -580,7 +578,7 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(context->id, soptimestamp)));
+			KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
 
 		/* end-of-pipeline timestamp */
@@ -588,14 +586,14 @@
 			cp_type3_packet(CP_EVENT_WRITE, 3));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp)));
+			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
 
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			      KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-				      eoptimestamp)));
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+				eoptimestamp)));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 	} else {
@@ -603,13 +601,11 @@
 			cp_type3_packet(CP_EVENT_WRITE, 3));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			      KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-				      eoptimestamp)));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-			rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[context_id]);
 	}
 
-	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
+	if (context) {
 		/* Conditional execution based on memory values */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_COND_EXEC, 4));
@@ -641,6 +637,30 @@
 	return timestamp;
 }
 
+void
+adreno_ringbuffer_issuecmds_intr(struct kgsl_device *device,
+						struct kgsl_context *k_ctxt,
+						unsigned int *cmds,
+						int sizedwords)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	struct adreno_context *a_ctxt = NULL;
+
+	if (!k_ctxt)
+		return;
+
+	a_ctxt = k_ctxt->devctxt;
+
+	if (k_ctxt->id == KGSL_CONTEXT_INVALID ||
+		a_ctxt == NULL ||
+		device->state & KGSL_STATE_HUNG)
+		return;
+
+	adreno_ringbuffer_addcmds(rb, a_ctxt, KGSL_CMD_FLAGS_DUMMY_INTR_CMD,
+			cmds, sizedwords);
+}
+
 unsigned int
 adreno_ringbuffer_issuecmds(struct kgsl_device *device,
 						struct adreno_context *drawctxt,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 4cc57c2..6c3d9b1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -110,6 +110,11 @@
 					unsigned int *cmdaddr,
 					int sizedwords);
 
+void adreno_ringbuffer_issuecmds_intr(struct kgsl_device *device,
+					struct kgsl_context *k_ctxt,
+					unsigned int *cmdaddr,
+					int sizedwords);
+
 void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb);
 
 void kgsl_cp_intrcallback(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 278be99..62e1521 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -450,6 +450,7 @@
 	    device->state == KGSL_STATE_ACTIVE &&
 		device->requested_state == KGSL_STATE_NONE) {
 		kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+		kgsl_pwrscale_idle(device, 1);
 		if (kgsl_pwrctrl_sleep(device) != 0)
 			mod_timer(&device->idle_timer,
 				  jiffies +
@@ -2273,7 +2274,8 @@
 
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-	result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+	result = remap_pfn_range(vma, vma->vm_start,
+				device->memstore.physaddr >> PAGE_SHIFT,
 				 vma_size, vma->vm_page_prot);
 	if (result != 0)
 		KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
@@ -2327,7 +2329,7 @@
 
 	/* Handle leagacy behavior for memstore */
 
-	if (vma_offset == device->memstore.physaddr)
+	if (vma_offset == device->memstore.gpuaddr)
 		return kgsl_mmap_memstore(device, vma);
 
 	/* Find a chunk of GPU memory */
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index edccff1..d8472f2 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -686,7 +686,6 @@
 
 static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
 {
-	kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
 	mmu->flags &= ~KGSL_FLAGS_STARTED;
 }
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 016771b..e858651 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -880,7 +880,6 @@
 	 */
 
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
-		kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
 		/* detach iommu attachment */
 		kgsl_detach_pagetable_iommu_domain(mmu);
 		mmu->hwpagetable = NULL;
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index bc6ec8e..5293d66 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -299,4 +299,14 @@
 		(gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
 }
 
+static inline unsigned int kgsl_mmu_get_int_mask(void)
+{
+	/* Dont enable gpummu interrupts, if iommu is enabled */
+	if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
+		return KGSL_MMU_INT_MASK;
+	else
+		return (MH_INTERRUPT_MASK__AXI_READ_ERROR |
+			MH_INTERRUPT_MASK__AXI_WRITE_ERROR);
+}
+
 #endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index fbf3bb4..6d4d4d3 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -439,8 +439,8 @@
 		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
 			&pwr->power_flags)) {
 			trace_kgsl_rail(device, state);
-			if (pwr->gpu_dig)
-				regulator_disable(pwr->gpu_dig);
+			if (pwr->gpu_cx)
+				regulator_disable(pwr->gpu_cx);
 			if (pwr->gpu_reg)
 				regulator_disable(pwr->gpu_reg);
 		}
@@ -456,8 +456,8 @@
 							"failed: %d\n",
 							status);
 			}
-			if (pwr->gpu_dig) {
-				int status = regulator_enable(pwr->gpu_dig);
+			if (pwr->gpu_cx) {
+				int status = regulator_enable(pwr->gpu_cx);
 				if (status)
 					KGSL_DRV_ERR(device,
 							"cx regulator_enable "
@@ -547,11 +547,11 @@
 		pwr->gpu_reg = NULL;
 
 	if (pwr->gpu_reg) {
-		pwr->gpu_dig = regulator_get(&pdev->dev, "vdd_dig");
-		if (IS_ERR(pwr->gpu_dig))
-			pwr->gpu_dig = NULL;
+		pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
+		if (IS_ERR(pwr->gpu_cx))
+			pwr->gpu_cx = NULL;
 	} else
-		pwr->gpu_dig = NULL;
+		pwr->gpu_cx = NULL;
 
 	pwr->power_flags = 0;
 
@@ -615,9 +615,9 @@
 		pwr->gpu_reg = NULL;
 	}
 
-	if (pwr->gpu_dig) {
-		regulator_put(pwr->gpu_dig);
-		pwr->gpu_dig = NULL;
+	if (pwr->gpu_cx) {
+		regulator_put(pwr->gpu_cx);
+		pwr->gpu_cx = NULL;
 	}
 
 	for (i = 1; i < KGSL_MAX_CLKS; i++)
@@ -640,7 +640,7 @@
 
 	mutex_lock(&device->mutex);
 	if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
-		kgsl_pwrscale_idle(device);
+		kgsl_pwrscale_idle(device, 0);
 
 		if (kgsl_pwrctrl_sleep(device) != 0) {
 			mod_timer(&device->idle_timer,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 954c818..cd44152 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -50,7 +50,7 @@
 	unsigned int interval_timeout;
 	bool strtstp_sleepwake;
 	struct regulator *gpu_reg;
-	struct regulator *gpu_dig;
+	struct regulator *gpu_cx;
 	uint32_t pcl;
 	unsigned int nap_allowed;
 	unsigned int idle_needed;
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 6fb9326..f6277b3 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -237,21 +237,18 @@
 void kgsl_pwrscale_busy(struct kgsl_device *device)
 {
 	if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy)
-		if ((!device->pwrscale.gpu_busy) &&
-			(device->requested_state != KGSL_STATE_SLUMBER))
+		if (device->requested_state != KGSL_STATE_SLUMBER)
 			device->pwrscale.policy->busy(device,
 					&device->pwrscale);
-	device->pwrscale.gpu_busy = 1;
 }
 
-void kgsl_pwrscale_idle(struct kgsl_device *device)
+void kgsl_pwrscale_idle(struct kgsl_device *device, unsigned int ignore_idle)
 {
 	if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle)
 		if (device->requested_state != KGSL_STATE_SLUMBER &&
 			device->requested_state != KGSL_STATE_SLEEP)
 			device->pwrscale.policy->idle(device,
-					&device->pwrscale);
-	device->pwrscale.gpu_busy = 0;
+					&device->pwrscale, ignore_idle);
 }
 EXPORT_SYMBOL(kgsl_pwrscale_idle);
 
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
index 34698cd..ba9b1af 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.h
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -23,7 +23,8 @@
 	void (*close)(struct kgsl_device *device,
 		struct kgsl_pwrscale *pwrscale);
 	void (*idle)(struct kgsl_device *device,
-		struct kgsl_pwrscale *pwrscale);
+		struct kgsl_pwrscale *pwrscale,
+		unsigned int ignore_idle);
 	void (*busy)(struct kgsl_device *device,
 		struct kgsl_pwrscale *pwrscale);
 	void (*sleep)(struct kgsl_device *device,
@@ -36,7 +37,6 @@
 	struct kgsl_pwrscale_policy *policy;
 	struct kobject kobj;
 	void *priv;
-	int gpu_busy;
 	int enabled;
 };
 
@@ -64,7 +64,8 @@
 	struct kgsl_pwrscale_policy *policy);
 void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
 
-void kgsl_pwrscale_idle(struct kgsl_device *device);
+void kgsl_pwrscale_idle(struct kgsl_device *device,
+				unsigned int ignore_idle);
 void kgsl_pwrscale_busy(struct kgsl_device *device);
 void kgsl_pwrscale_sleep(struct kgsl_device *device);
 void kgsl_pwrscale_wake(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
index 4102302..fc58dd1 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c
@@ -131,7 +131,7 @@
 }
 
 static void idlestats_idle(struct kgsl_device *device,
-			struct kgsl_pwrscale *pwrscale)
+		struct kgsl_pwrscale *pwrscale, unsigned int ignore_idle)
 {
 	int i, nr_cpu;
 	struct idlestats_priv *priv = pwrscale->priv;
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
index 61d4b2d..c6f8b1b 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -17,6 +17,7 @@
 #include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
 #include "a2xx_reg.h"
+#include "kgsl_trace.h"
 
 struct msm_priv {
 	struct kgsl_device *device;
@@ -26,6 +27,7 @@
 	struct msm_dcvs_idle idle_source;
 	struct msm_dcvs_freq freq_sink;
 	struct msm_dcvs_core_info *core_info;
+	int gpu_busy;
 };
 
 static int msm_idle_enable(struct msm_dcvs_idle *self,
@@ -89,29 +91,40 @@
 			struct kgsl_pwrscale *pwrscale)
 {
 	struct msm_priv *priv = pwrscale->priv;
-	if (priv->enabled)
+	if (priv->enabled && !priv->gpu_busy) {
 		msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_EXIT, 0);
+		trace_kgsl_mpdcvs(device, 1);
+		priv->gpu_busy = 1;
+	}
 	return;
 }
 
 static void msm_idle(struct kgsl_device *device,
-			struct kgsl_pwrscale *pwrscale)
+		struct kgsl_pwrscale *pwrscale, unsigned int ignore_idle)
 {
 	struct msm_priv *priv = pwrscale->priv;
-	unsigned int rb_rptr, rb_wptr;
-	kgsl_regread(device, REG_CP_RB_RPTR, &rb_rptr);
-	kgsl_regread(device, REG_CP_RB_WPTR, &rb_wptr);
 
-	if (priv->enabled && (rb_rptr == rb_wptr))
-		msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
-
+	if (priv->enabled && priv->gpu_busy)
+		if (device->ftbl->isidle(device)) {
+			msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+			trace_kgsl_mpdcvs(device, 0);
+			priv->gpu_busy = 0;
+		}
 	return;
 }
 
 static void msm_sleep(struct kgsl_device *device,
 			struct kgsl_pwrscale *pwrscale)
 {
-	/* do we need to reset any parameters here? */
+	struct msm_priv *priv = pwrscale->priv;
+
+	if (priv->enabled && priv->gpu_busy) {
+		msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+		trace_kgsl_mpdcvs(device, 0);
+		priv->gpu_busy = 0;
+	}
+
+	return;
 }
 
 static int msm_init(struct kgsl_device *device,
@@ -159,10 +172,10 @@
 	ret = msm_dcvs_freq_sink_register(&priv->freq_sink);
 	if (ret >= 0) {
 		if (device->ftbl->isidle(device)) {
-			device->pwrscale.gpu_busy = 0;
+			priv->gpu_busy = 0;
 			msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
 		} else {
-			device->pwrscale.gpu_busy = 1;
+			priv->gpu_busy = 1;
 		}
 		return 0;
 	}
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index d6c5e66..1b029b1 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -119,16 +119,19 @@
 					device->pwrctrl.default_pwrlevel);
 }
 
-static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale,
+						unsigned int ignore_idle)
 {
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	struct tz_priv *priv = pwrscale->priv;
 	struct kgsl_power_stats stats;
 	int val, idle;
 
+	if (ignore_idle)
+		return;
+
 	/* In "performance" mode the clock speed always stays
 	   the same */
-
 	if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
 		return;
 
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 3eff40f..81ab3fb 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -251,6 +251,29 @@
 	)
 );
 
+TRACE_EVENT(kgsl_mpdcvs,
+
+	TP_PROTO(struct kgsl_device *device, unsigned int state),
+
+	TP_ARGS(device, state),
+
+	TP_STRUCT__entry(
+		__string(device_name, device->name)
+		__field(unsigned int, state)
+	),
+
+	TP_fast_assign(
+		__assign_str(device_name, device->name);
+		__entry->state = state;
+	),
+
+	TP_printk(
+		"d_name=%s %s",
+		__get_str(device_name),
+		__entry->state ? "BUSY" : "IDLE"
+	)
+);
+
 DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
 	TP_PROTO(struct kgsl_device *device, unsigned int state),
 
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 6efba45..3504dfc 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -892,7 +892,8 @@
 
 	if (state) {
 		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
-		z180_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+		z180_regwrite(device, MH_INTERRUPT_MASK,
+			kgsl_mmu_get_int_mask());
 	} else {
 		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
 		z180_regwrite(device, MH_INTERRUPT_MASK, 0);
diff --git a/drivers/hwmon/pm8xxx-adc.c b/drivers/hwmon/pm8xxx-adc.c
index aa9acf7..8e35252 100644
--- a/drivers/hwmon/pm8xxx-adc.c
+++ b/drivers/hwmon/pm8xxx-adc.c
@@ -23,7 +23,6 @@
 #include <linux/hwmon.h>
 #include <linux/module.h>
 #include <linux/debugfs.h>
-#include <linux/wakelock.h>
 #include <linux/interrupt.h>
 #include <linux/completion.h>
 #include <linux/hwmon-sysfs.h>
@@ -123,6 +122,7 @@
 #define PM8XXX_ADC_PA_THERM_VREG_UA_LOAD		100000
 #define PM8XXX_ADC_HWMON_NAME_LENGTH			32
 #define PM8XXX_ADC_BTM_INTERVAL_MAX			0x14
+#define PM8XXX_ADC_COMPLETION_TIMEOUT			(2 * HZ)
 
 struct pm8xxx_adc {
 	struct device				*dev;
@@ -141,7 +141,6 @@
 	struct work_struct			cool_work;
 	uint32_t				mpp_base;
 	struct device				*hwmon;
-	struct wake_lock			adc_wakelock;
 	int					msm_suspend_check;
 	struct pm8xxx_adc_amux_properties	*conv;
 	struct pm8xxx_adc_arb_btm_param		batt;
@@ -223,7 +222,6 @@
 			pr_err("PM8xxx ADC request made after suspend_noirq "
 					"with channel: %d\n", channel);
 		data_arb_cntrl |= PM8XXX_ADC_ARB_USRP_CNTRL1_EN_ARB;
-		wake_lock(&adc_pmic->adc_wakelock);
 	}
 
 	/* Write twice to the CNTRL register for the arbiter settings
@@ -242,8 +240,7 @@
 		INIT_COMPLETION(adc_pmic->adc_rslt_completion);
 		rc = pm8xxx_writeb(adc_pmic->dev->parent,
 			PM8XXX_ADC_ARB_USRP_CNTRL1, data_arb_cntrl);
-	} else
-		wake_unlock(&adc_pmic->adc_wakelock);
+	}
 
 	return 0;
 }
@@ -734,7 +731,23 @@
 		goto fail;
 	}
 
-	wait_for_completion(&adc_pmic->adc_rslt_completion);
+	rc = wait_for_completion_timeout(&adc_pmic->adc_rslt_completion,
+						PM8XXX_ADC_COMPLETION_TIMEOUT);
+	if (!rc) {
+		u8 data_arb_usrp_cntrl1 = 0;
+		rc = pm8xxx_adc_read_reg(PM8XXX_ADC_ARB_USRP_CNTRL1,
+					&data_arb_usrp_cntrl1);
+		if (rc < 0)
+			goto fail;
+		if (data_arb_usrp_cntrl1 == (PM8XXX_ADC_ARB_USRP_CNTRL1_EOC |
+					PM8XXX_ADC_ARB_USRP_CNTRL1_EN_ARB))
+			pr_debug("End of conversion status set\n");
+		else {
+			pr_err("EOC interrupt not received\n");
+			rc = -EINVAL;
+			goto fail;
+		}
+	}
 
 	rc = pm8xxx_adc_read_adc_code(&result->adc_code);
 	if (rc) {
@@ -1134,7 +1147,6 @@
 	struct pm8xxx_adc *adc_pmic = pmic_adc;
 	int i;
 
-	wake_lock_destroy(&adc_pmic->adc_wakelock);
 	platform_set_drvdata(pdev, NULL);
 	pmic_adc = NULL;
 	if (!pa_therm) {
@@ -1236,8 +1248,6 @@
 
 	disable_irq_nosync(adc_pmic->btm_cool_irq);
 	platform_set_drvdata(pdev, adc_pmic);
-	wake_lock_init(&adc_pmic->adc_wakelock, WAKE_LOCK_SUSPEND,
-					"pm8xxx_adc_wakelock");
 	adc_pmic->msm_suspend_check = 0;
 	pmic_adc = adc_pmic;
 
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index e59ca17..55639e0 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -415,6 +415,7 @@
 
 	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
 	input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+	__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
 
 	input_set_abs_params(input_dev, ABS_X, ts->min_x,
 				ts->max_x, pdata->fuzzx, 0);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 790cc10..df71f76 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -685,6 +685,52 @@
 	}
 }
 
+void dvb_dmx_swfilter_section_packets(struct dvb_demux *demux, const u8 *buf,
+			      size_t count)
+{
+	struct dvb_demux_feed *feed;
+	u16 pid = ts_pid(buf);
+	struct timespec pre_time;
+
+	if (dvb_demux_performancecheck)
+		pre_time = current_kernel_time();
+
+	spin_lock(&demux->lock);
+
+	demux->sw_filter_abort = 0;
+
+	while (count--) {
+		if (buf[0] != 0x47) {
+			buf += 188;
+			continue;
+		}
+
+		if (demux->playback_mode == DMX_PB_MODE_PULL)
+			if (dvb_dmx_swfilter_buffer_check(demux, pid) < 0)
+				break;
+
+		list_for_each_entry(feed, &demux->feed_list, list_head) {
+			if (feed->pid != pid)
+				continue;
+
+			if (!feed->feed.sec.is_filtering)
+				continue;
+
+			if (dvb_dmx_swfilter_section_packet(feed, buf) < 0) {
+				feed->feed.sec.seclen = 0;
+				feed->feed.sec.secbufp = 0;
+			}
+		}
+		buf += 188;
+	}
+
+	spin_unlock(&demux->lock);
+
+	if (dvb_demux_performancecheck)
+		demux->total_process_time += dvb_dmx_calc_time_delta(pre_time);
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_section_packets);
+
 void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
 			      size_t count)
 {
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index a663191..5a32363 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -179,6 +179,8 @@
 
 int dvb_dmx_init(struct dvb_demux *dvbdemux);
 void dvb_dmx_release(struct dvb_demux *dvbdemux);
+void dvb_dmx_swfilter_section_packets(struct dvb_demux *demux, const u8 *buf,
+			      size_t count);
 void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
 			      size_t count);
 void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
diff --git a/drivers/media/radio/radio-tavarua.c b/drivers/media/radio/radio-tavarua.c
index 116b7f9..af4c2c9 100644
--- a/drivers/media/radio/radio-tavarua.c
+++ b/drivers/media/radio/radio-tavarua.c
@@ -122,6 +122,8 @@
 	int enable_optimized_srch_alg;
 	unsigned char spur_table_size;
 	struct fm_spur_data spur_data;
+	atomic_t validate_channel;
+	unsigned char is_station_valid;
 };
 
 /**************************************************************************
@@ -152,6 +154,7 @@
 static int update_spur_table(struct tavarua_device *radio);
 static int xfr_rdwr_data(struct tavarua_device *radio, int op, int size,
 	unsigned long offset, unsigned char *buf);
+static int compute_MPX_DCC(struct tavarua_device *radio, int *val);
 
 /* work function */
 static void read_int_stat(struct work_struct *work);
@@ -734,7 +737,7 @@
 static void tavarua_handle_interrupts(struct tavarua_device *radio)
 {
 	int i;
-	int retval;
+	int retval, adj_channel_tune_req = 0;
 	unsigned char xfr_status;
 	if (!radio->handle_irq) {
 		FMDBG("IRQ happend, but I wont handle it\n");
@@ -758,7 +761,25 @@
 			complete(&radio->sync_req_done);
 			radio->tune_req = 0;
 		}
-		tavarua_q_event(radio, TAVARUA_EVT_TUNE_SUCC);
+
+		/*
+		 * Do not queue the TUNE event while validating if the station
+		 * is good or not. As part of channel validation we tune to the
+		 * adjacent station, measure its MPX_DCC value, then tune back
+		 * to the original station and measure its MPX_DCC value.
+		 * Compare the MPX_DCC values of curent and adjacent stations
+		 * and decide if the channel is valid or not. During this period
+		 * we should not queue the TUNE event to the upper layers.
+		 */
+		adj_channel_tune_req = atomic_read(&radio->validate_channel);
+		if (adj_channel_tune_req) {
+			complete(&radio->sync_req_done);
+			FMDBG("Tune event for adjacent channel\n");
+		} else {
+			tavarua_q_event(radio, TAVARUA_EVT_TUNE_SUCC);
+			FMDBG("Queueing Tune event\n");
+		}
+
 		if (radio->srch_params.get_list) {
 			tavarua_start_xfr(radio, TAVARUA_XFR_SRCH_LIST,
 							RX_STATIONS_0);
@@ -2603,40 +2624,57 @@
 static int xfr_rdwr_data(struct tavarua_device *radio, int op, int size,
 	unsigned long offset, unsigned char *buf) {
 
-	unsigned char xfr_buf[XFR_REG_NUM];
+	unsigned char xfr_buf[XFR_REG_NUM + 1];
 	int retval = 0, temp = 0;
 
+	/* zero initialize the buffer */
 	memset(xfr_buf, 0x0, XFR_REG_NUM);
+
+	/* save the 'size' parameter */
 	temp = size;
 
-	xfr_buf[XFR_MODE_OFFSET]     = (size << 1);
+	/* Populate the XFR bytes */
+	xfr_buf[XFR_MODE_OFFSET]     = LSH_DATA(size, 1);
 	xfr_buf[XFR_ADDR_MSB_OFFSET] = GET_FREQ(offset, 1);
 	xfr_buf[XFR_ADDR_LSB_OFFSET] = GET_FREQ(offset, 0);
-
 	if (op == XFR_READ) {
+		if (size > XFR_REG_NUM) {
+			FMDERR("%s: Cant read more than 16 bytes\n", __func__);
+			return -EINVAL;
+		}
 		xfr_buf[XFR_MODE_OFFSET] |= (XFR_PEEK_MODE);
 		size = 3;
 	} else if (op == XFR_WRITE) {
+		if (size > (XFR_REG_NUM - 2)) {
+			FMDERR("%s: Cant write more than 14 bytes\n", __func__);
+			return -EINVAL;
+		}
 		xfr_buf[XFR_MODE_OFFSET] |= (XFR_POKE_MODE);
 		memcpy(&xfr_buf[XFR_DATA_OFFSET], buf, size);
 		size += 3;
 	}
 
+	/* Perform the XFR READ/WRITE operation */
+	init_completion(&radio->sync_req_done);
 	retval = tavarua_write_registers(radio, XFRCTRL, xfr_buf, size);
 	if (retval < 0) {
-		FMDERR("%s: Failed to performXFR operation\n", __func__);
+		FMDERR("%s: Failed to perform XFR operation\n", __func__);
 		return retval;
 	}
 
-	size = temp;
-
 	/*Wait for the XFR interrupt */
-	init_completion(&radio->sync_req_done);
 	if (!wait_for_completion_timeout(&radio->sync_req_done,
 		msecs_to_jiffies(WAIT_TIMEOUT))) {
 		FMDERR("Timeout: No XFR interrupt");
+		return -ETIMEDOUT;
 	}
 
+	/*
+	 * For XFR READ operation save the XFR data provided by the SOC.
+	 * Firmware reads the data from the address specified and places
+	 * them in to the registers XFRDAT0-XFRDAT15 which the host can read.
+	 */
+	size = temp;
 	if (op == XFR_READ) {
 		retval = tavarua_read_registers(radio, XFRDAT0, size);
 		if (retval < 0) {
@@ -2645,6 +2683,10 @@
 		}
 		if (buf != NULL)
 			memcpy(buf, &radio->registers[XFRDAT0], size);
+		else {
+			FMDERR("%s: No buffer to copy XFR data\n", __func__);
+			return -EINVAL;
+		}
 	}
 
 	return retval;
@@ -2966,6 +3008,9 @@
 	case V4L2_CID_PRIVATE_IRIS_GET_SINR:
 		retval = 0;
 		break;
+	case V4L2_CID_PRIVATE_VALID_CHANNEL:
+		ctrl->value = radio->is_station_valid;
+		break;
 	default:
 		retval = -EINVAL;
 	}
@@ -3114,12 +3159,15 @@
 		struct v4l2_control *ctrl)
 {
 	struct tavarua_device *radio = video_get_drvdata(video_devdata(file));
-	int retval = 0;
-	int size = 0, cnt = 0;
+	int retval = 0, size = 0, cnt = 0;
 	unsigned char value;
 	unsigned char xfr_buf[XFR_REG_NUM];
 	unsigned char tx_data[XFR_REG_NUM];
 	unsigned char dis_buf[XFR_REG_NUM];
+	unsigned int freq = 0, mpx_dcc = 0;
+	unsigned long curr = 0, prev = 0;
+
+	memset(xfr_buf, 0x0, XFR_REG_NUM);
 
 	switch (ctrl->id) {
 	case V4L2_CID_AUDIO_VOLUME:
@@ -3538,6 +3586,104 @@
 		if (retval < 0)
 			FMDERR("Tone generator failed\n");
 		break;
+	case V4L2_CID_PRIVATE_VALID_CHANNEL:
+		/* Do not notify the host of tune event */
+		atomic_set(&radio->validate_channel, 1);
+
+		FMDBG("Going into low power mode\n");
+		retval = tavarua_disable_interrupts(radio);
+
+		/*
+		 * Tune to 50KHz adjacent channel. If the requested station
+		 * falls in JAPAN band and on the lower band-limit, then the
+		 * adjacnet channel to be considered is 50KHz to the right side
+		 * of the requested station as firmware does not allows to tune
+		 * to frequency outside the range: 76000KHz to 108000KHz.
+		 */
+		if (ctrl->value == REGION_JAPAN_STANDARD_BAND_LOW)
+			freq = (ctrl->value + ADJ_CHANNEL_KHZ);
+		else
+			freq = (ctrl->value - ADJ_CHANNEL_KHZ);
+		INIT_COMPLETION(radio->sync_req_done);
+		retval = tavarua_set_freq(radio, (freq * TUNE_MULT));
+		if (retval < 0) {
+			FMDERR("Failed to tune to adjacent station\n");
+			goto error;
+		}
+		if (!wait_for_completion_timeout(&radio->sync_req_done,
+			msecs_to_jiffies(wait_timeout))) {
+			FMDERR("Timeout: No Tune response\n");
+			retval = -ETIMEDOUT;
+			goto error;
+		}
+
+		/*
+		 * Wait for a minimum of 100ms for the firmware
+		 * to start collecting the MPX_DCC values
+		 */
+		msleep(TAVARUA_DELAY * 10);
+
+		/* Compute MPX_DCC of adjacent station */
+		retval = compute_MPX_DCC(radio, &mpx_dcc);
+		if (retval < 0) {
+			FMDERR("Failed to get MPX_DCC of adjacent station\n");
+			goto error;
+		}
+		/* Calculate the absolute value of MPX_DCC */
+		prev = abs(mpx_dcc);
+
+		/* Tune back to original station */
+		INIT_COMPLETION(radio->sync_req_done);
+		retval = tavarua_set_freq(radio, (ctrl->value * TUNE_MULT));
+		if (retval < 0) {
+			FMDERR("Failed to tune to requested station\n");
+			goto error;
+		}
+		if (!wait_for_completion_timeout(&radio->sync_req_done,
+			msecs_to_jiffies(wait_timeout))) {
+			FMDERR("Timeout: No Tune response\n");
+			retval = -ETIMEDOUT;
+			goto error;
+		}
+
+		/*
+		 * Wait for a minimum of 100ms for the firmware
+		 * to start collecting the MPX_DCC values
+		 */
+		msleep(TAVARUA_DELAY * 10);
+
+		/* Compute MPX_DCC of current station */
+		retval = compute_MPX_DCC(radio, &mpx_dcc);
+		if (retval < 0) {
+			FMDERR("Failed to get MPX_DCC of current station\n");
+			goto error;
+		}
+		/* Calculate the absolute value of MPX_DCC */
+		curr = abs(mpx_dcc);
+
+		FMDBG("Going into normal power mode\n");
+		tavarua_setup_interrupts(radio,
+			(radio->registers[RDCTRL] & 0x03));
+
+		FMDBG("Absolute MPX_DCC of current station  : %lu\n", curr);
+		FMDBG("Absolute MPX_DCC of adjacent station : %lu\n", prev);
+
+		/*
+		 * For valid stations, the absolute MPX_DCC value will be within
+		 * the range 0 <= MPX_DCC <= 12566 and the MPX_DCC value of the
+		 * adjacent station will be greater than 20,000.
+		 */
+		if ((curr <= MPX_DCC_LIMIT) &&
+			(prev > MPX_DCC_UPPER_LIMIT)) {
+			FMDBG("%d KHz is A VALID STATION!\n", ctrl->value);
+			radio->is_station_valid = VALID_CHANNEL;
+		} else {
+			FMDBG("%d KHz is NOT A VALID STATION!\n", ctrl->value);
+			radio->is_station_valid = INVALID_CHANNEL;
+		}
+error:
+		atomic_set(&radio->validate_channel, 0);
+		break;
 	default:
 		retval = -EINVAL;
 	}
@@ -3548,6 +3694,56 @@
 	return retval;
 }
 
+static int compute_MPX_DCC(struct tavarua_device *radio, int *val)
+{
+
+	int DCC = 0, retval = 0;
+	int MPX_DCC[3];
+	unsigned char value;
+	unsigned char xfr_buf[XFR_REG_NUM];
+
+	/* Freeze the MPX_DCC value from changing */
+	value = CTRL_ON;
+	retval = xfr_rdwr_data(radio, XFR_WRITE, 1, MPX_DCC_BYPASS_REG, &value);
+	if (retval < 0) {
+		FMDERR("%s: Failed to freeze MPX_DCC\n", __func__);
+		return retval;
+	}
+
+	/* Measure the MPX_DCC of current station. */
+	retval = xfr_rdwr_data(radio, XFR_READ, 3, MPX_DCC_DATA_REG, xfr_buf);
+	if (retval < 0) {
+		FMDERR("%s: Failed to read MPX_DCC\n", __func__);
+		return retval;
+	}
+	MPX_DCC[0] = xfr_buf[0];
+	MPX_DCC[1] = xfr_buf[1];
+	MPX_DCC[2] = xfr_buf[2];
+	/*
+	 * Form the final MPX_DCC parameter
+	 * MPX_DCC[0] will form the LSB part
+	 * MPX_DCC[1] will be the middle part and 4 bits of
+	 * MPX_DCC[2] will be the MSB part of the 20-bit signed MPX_DCC
+	 */
+	DCC = (LSH_DATA(MPX_DCC[2], 16) | LSH_DATA(MPX_DCC[1], 8) | MPX_DCC[0]);
+
+	/* if bit-19 is '1',set remaining bits to '1' & make it -tive */
+	if (DCC & 0x00080000)
+		DCC |= 0xFFF00000;
+
+	*val = DCC;
+
+	/* Un-freeze the MPX_DCC value */
+	value = CTRL_OFF;
+	retval = xfr_rdwr_data(radio, XFR_WRITE, 1, 0x88C0, &value);
+	if (retval < 0) {
+		FMDERR("%s: Failed to un-freeze MPX_DCC\n", __func__);
+		return retval;
+	}
+
+	return retval;
+}
+
 /*=============================================================================
 FUNCTION:  tavarua_vidioc_g_tuner
 =============================================================================*/
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
index ecc4fea..c678ea2 100644
--- a/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
@@ -69,9 +69,11 @@
 #define RAW_INTF_1_OVERFLOW_IRQ      25
 #define RESET_DONE_IRQ               27
 
-#define ISPIF_IRQ_STATUS_MASK        0xA493000
-#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
-#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
+#define ISPIF_IRQ_STATUS_MASK          0xA493249
+#define ISPIF_IRQ_1_STATUS_MASK        0xA493249
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK  0x492000
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK  0x249
+#define ISPIF_IRQ_STATUS_SOF_MASK      0x492249
 #define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
 
 #endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
index 820adf4..4b17538 100644
--- a/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
@@ -83,9 +83,12 @@
 #define RAW_INTF_1_OVERFLOW_IRQ      25
 #define RESET_DONE_IRQ               27
 
-#define ISPIF_IRQ_STATUS_MASK        0xA493000
-#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
-#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
+#define ISPIF_IRQ_STATUS_MASK          0xA493249
+#define ISPIF_IRQ_1_STATUS_MASK        0xA493249
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK  0x492000
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK  0x249
+#define ISPIF_IRQ_STATUS_SOF_MASK      0x492249
 #define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
 
+
 #endif
diff --git a/drivers/media/video/msm/csi/msm_ispif.c b/drivers/media/video/msm/csi/msm_ispif.c
index b23efb5..092ee90 100644
--- a/drivers/media/video/msm/csi/msm_ispif.c
+++ b/drivers/media/video/msm/csi/msm_ispif.c
@@ -24,6 +24,7 @@
 #define V4L2_IDENT_ISPIF                     50001
 #define CSID_VERSION_V2                      0x02000011
 #define CSID_VERSION_V3                      0x30000000
+
 #define MAX_CID 15
 
 static atomic_t ispif_irq_cnt;
@@ -52,6 +53,7 @@
 			else
 				data1 |= (0x1 << PIX_0_VFE_RST_STB) |
 					(0x1 << PIX_0_CSID_RST_STB);
+			ispif->pix_sof_count = 0;
 			break;
 
 		case RDI0:
@@ -127,7 +129,7 @@
 			(0x1 << PIX_1_CSID_RST_STB) |
 			(0x1 << RDI_2_VFE_RST_STB) |
 			(0x1 << RDI_2_CSID_RST_STB);
-	return 0;
+	ispif->pix_sof_count = 0;
 	msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
 	rc = wait_for_completion_interruptible(&ispif->reset_complete);
 	if (ispif->csid_version >= CSID_VERSION_V3) {
@@ -568,7 +570,8 @@
 
 DECLARE_TASKLET(ispif_tasklet, ispif_do_tasklet, 0);
 
-static void ispif_process_irq(struct ispif_irq_status *out)
+static void ispif_process_irq(struct ispif_device *ispif,
+	struct ispif_irq_status *out)
 {
 	unsigned long flags;
 	struct ispif_isr_queue_cmd *qcmd;
@@ -583,6 +586,13 @@
 	qcmd->ispifInterruptStatus0 = out->ispifIrqStatus0;
 	qcmd->ispifInterruptStatus1 = out->ispifIrqStatus1;
 
+	if (qcmd->ispifInterruptStatus0 & ISPIF_IRQ_STATUS_PIX_SOF_MASK) {
+		CDBG("%s: ispif PIX sof irq\n", __func__);
+		ispif->pix_sof_count++;
+		v4l2_subdev_notify(&ispif->subdev, NOTIFY_VFE_SOF_COUNT,
+			(void *)&ispif->pix_sof_count);
+	}
+
 	spin_lock_irqsave(&ispif_tasklet_lock, flags);
 	list_add_tail(&qcmd->list, &ispif_tasklet_q);
 
@@ -605,7 +615,7 @@
 	msm_camera_io_w(out->ispifIrqStatus1,
 		ispif->base + ISPIF_IRQ_CLEAR_1_ADDR);
 
-	CDBG("ispif->irq: Irq_status0 = 0x%x\n",
+	CDBG("%s: irq ispif->irq: Irq_status0 = 0x%x\n", __func__,
 		out->ispifIrqStatus0);
 	if (out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
 		if (out->ispifIrqStatus0 & (0x1 << RESET_DONE_IRQ))
@@ -614,10 +624,10 @@
 			pr_err("%s: pix intf 0 overflow.\n", __func__);
 		if (out->ispifIrqStatus0 & (0x1 << RAW_INTF_0_OVERFLOW_IRQ))
 			pr_err("%s: rdi intf 0 overflow.\n", __func__);
-		if ((out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_RDI_SOF_MASK) ||
+		if ((out->ispifIrqStatus0 & ISPIF_IRQ_STATUS_SOF_MASK) ||
 			(out->ispifIrqStatus1 &
-				ISPIF_IRQ_STATUS_RDI_SOF_MASK)) {
-			ispif_process_irq(out);
+				ISPIF_IRQ_STATUS_SOF_MASK)) {
+			ispif_process_irq(ispif, out);
 		}
 	}
 	msm_camera_io_w(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
@@ -648,7 +658,6 @@
 	INIT_LIST_HEAD(&ispif_tasklet_q);
 	rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
 		IRQF_TRIGGER_RISING, "ispif", ispif);
-
 	init_completion(&ispif->reset_complete);
 
 	ispif->csid_version = *csid_version;
diff --git a/drivers/media/video/msm/csi/msm_ispif.h b/drivers/media/video/msm/csi/msm_ispif.h
index df93a44..f4ad661 100644
--- a/drivers/media/video/msm/csi/msm_ispif.h
+++ b/drivers/media/video/msm/csi/msm_ispif.h
@@ -34,6 +34,7 @@
 	struct completion reset_complete;
 	uint32_t csid_version;
 	struct clk *ispif_clk[5];
+	uint32_t pix_sof_count;
 };
 
 struct ispif_isr_queue_cmd {
diff --git a/drivers/media/video/msm/io/Makefile b/drivers/media/video/msm/io/Makefile
index 611eecd..fdff226 100644
--- a/drivers/media/video/msm/io/Makefile
+++ b/drivers/media/video/msm/io/Makefile
@@ -1,6 +1,6 @@
 GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
 
-EXTRA_CFLAGS += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm -Idrivers/media/video/msm/cci
 obj-$(CONFIG_MSM_CAMERA)   += msm_camera_io_util.o msm_camera_i2c.o
 ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
   obj-$(CONFIG_MSM_CAMERA) += msm_camera_i2c_mux.o
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.c b/drivers/media/video/msm/io/msm_camera_i2c.c
index e946569..82bca02 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.c
+++ b/drivers/media/video/msm/io/msm_camera_i2c.c
@@ -10,7 +10,10 @@
  * GNU General Public License for more details.
  */
 
+#include <mach/camera.h>
 #include "msm_camera_i2c.h"
+#include "msm.h"
+#include "msm_cci.h"
 
 int32_t msm_camera_i2c_rxdata(struct msm_camera_i2c_client *dev_client,
 	unsigned char *rxdata, int data_length)
@@ -63,6 +66,7 @@
 	int32_t rc = -EFAULT;
 	unsigned char buf[client->addr_type+data_type];
 	uint8_t len = 0;
+
 	if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
 		&& client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
 		|| (data_type != MSM_CAMERA_I2C_BYTE_DATA
@@ -71,33 +75,51 @@
 
 	S_I2C_DBG("%s reg addr = 0x%x data type: %d\n",
 			  __func__, addr, data_type);
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		len = 1;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len+1, buf[len+1]);
-		len = 2;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		struct msm_camera_i2c_reg_conf reg_conf_tbl;
+		reg_conf_tbl.reg_addr = addr;
+		reg_conf_tbl.reg_data = data;
+		cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = &reg_conf_tbl;
+		cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+		cci_ctrl.cfg.cci_i2c_write_cfg.size = 1;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			len = 1;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len+1, buf[len+1]);
+			len = 2;
+		}
+		S_I2C_DBG("Data: 0x%x\n", data);
+		if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+			buf[len] = data;
+			S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+			len += 1;
+		} else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
+			buf[len] = data >> BITS_PER_BYTE;
+			buf[len+1] = data;
+			S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+			S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+			len += 2;
+		}
+		rc = msm_camera_i2c_txdata(client, buf, len);
+		if (rc < 0)
+			S_I2C_DBG("%s fail\n", __func__);
 	}
-	S_I2C_DBG("Data: 0x%x\n", data);
-	if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
-		buf[len] = data;
-		S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
-		len += 1;
-	} else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
-		buf[len] = data >> BITS_PER_BYTE;
-		buf[len+1] = data;
-		S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
-		S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
-		len += 2;
-	}
-
-	rc = msm_camera_i2c_txdata(client, buf, len);
-	if (rc < 0)
-		S_I2C_DBG("%s fail\n", __func__);
 	return rc;
 }
 
@@ -115,26 +137,44 @@
 
 	S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
 			  __func__, addr, num_byte);
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		len = 1;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len+1, buf[len+1]);
-		len = 2;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		struct msm_camera_i2c_reg_conf reg_conf_tbl[num_byte];
+		reg_conf_tbl[0].reg_addr = addr;
+		for (i = 0; i < num_byte; i++)
+			reg_conf_tbl[i].reg_data = data[i];
+		cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = reg_conf_tbl;
+		cci_ctrl.cfg.cci_i2c_write_cfg.size = num_byte;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			len = 1;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len+1, buf[len+1]);
+			len = 2;
+		}
+		for (i = 0; i < num_byte; i++) {
+			buf[i+len] = data[i];
+			S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
+			S_I2C_DBG("Data: 0x%x\n", data[i]);
+		}
+		rc = msm_camera_i2c_txdata(client, buf, len+num_byte);
+		if (rc < 0)
+			S_I2C_DBG("%s fail\n", __func__);
 	}
-	for (i = 0; i < num_byte; i++) {
-		buf[i+len] = data[i];
-		S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
-		S_I2C_DBG("Data: 0x%x\n", data[i]);
-	}
-
-	rc = msm_camera_i2c_txdata(client, buf, len+num_byte);
-	if (rc < 0)
-		S_I2C_DBG("%s fail\n", __func__);
 	return rc;
 }
 
@@ -302,65 +342,80 @@
 {
 	int i;
 	int32_t rc = -EFAULT;
-	for (i = 0; i < size; i++) {
-		enum msm_camera_i2c_data_type dt;
-		if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
-			rc = msm_camera_i2c_poll(client, reg_conf_tbl->reg_addr,
-				reg_conf_tbl->reg_data, reg_conf_tbl->dt);
-		} else {
-			if (reg_conf_tbl->dt == 0)
-				dt = data_type;
-			else
-				dt = reg_conf_tbl->dt;
-
-			switch (dt) {
-			case MSM_CAMERA_I2C_BYTE_DATA:
-			case MSM_CAMERA_I2C_WORD_DATA:
-				rc = msm_camera_i2c_write(
-					client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data, dt);
-				break;
-			case MSM_CAMERA_I2C_SET_BYTE_MASK:
-				rc = msm_camera_i2c_set_mask(client,
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = reg_conf_tbl;
+		cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+		cci_ctrl.cfg.cci_i2c_write_cfg.size = size;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		for (i = 0; i < size; i++) {
+			enum msm_camera_i2c_data_type dt;
+			if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
+				rc = msm_camera_i2c_poll(client,
 					reg_conf_tbl->reg_addr,
 					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_BYTE_DATA, 1);
-				break;
-			case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
-				rc = msm_camera_i2c_set_mask(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_BYTE_DATA, 0);
-				break;
-			case MSM_CAMERA_I2C_SET_WORD_MASK:
-				rc = msm_camera_i2c_set_mask(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_WORD_DATA, 1);
-				break;
-			case MSM_CAMERA_I2C_UNSET_WORD_MASK:
-				rc = msm_camera_i2c_set_mask(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_WORD_DATA, 0);
-				break;
-			case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
-				rc = msm_camera_i2c_set_write_mask_data(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					reg_conf_tbl->mask,
-					MSM_CAMERA_I2C_BYTE_DATA);
-				break;
-			default:
-				pr_err("%s: Unsupport data type: %d\n",
-					__func__, dt);
-				break;
+					reg_conf_tbl->dt);
+			} else {
+				if (reg_conf_tbl->dt == 0)
+					dt = data_type;
+				else
+					dt = reg_conf_tbl->dt;
+				switch (dt) {
+				case MSM_CAMERA_I2C_BYTE_DATA:
+				case MSM_CAMERA_I2C_WORD_DATA:
+					rc = msm_camera_i2c_write(
+						client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data, dt);
+					break;
+				case MSM_CAMERA_I2C_SET_BYTE_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_BYTE_DATA, 1);
+					break;
+				case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_BYTE_DATA, 0);
+					break;
+				case MSM_CAMERA_I2C_SET_WORD_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_WORD_DATA, 1);
+					break;
+				case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_WORD_DATA, 0);
+					break;
+				case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+					rc = msm_camera_i2c_set_write_mask_data(
+						client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						reg_conf_tbl->mask,
+						MSM_CAMERA_I2C_BYTE_DATA);
+					break;
+				default:
+					pr_err("%s: Unsupport data type: %d\n",
+						__func__, dt);
+					break;
+				}
 			}
+			if (rc < 0)
+				break;
+			reg_conf_tbl++;
 		}
-		if (rc < 0)
-			break;
-		reg_conf_tbl++;
 	}
 	return rc;
 }
@@ -378,16 +433,30 @@
 		&& data_type != MSM_CAMERA_I2C_WORD_DATA))
 		return rc;
 
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-	}
-	rc = msm_camera_i2c_rxdata(client, buf, data_type);
-	if (rc < 0) {
-		S_I2C_DBG("%s fail\n", __func__);
-		return rc;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		cci_ctrl.cmd = MSM_CCI_I2C_READ;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+		cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+		cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+		}
+		rc = msm_camera_i2c_rxdata(client, buf, data_type);
+		if (rc < 0) {
+			S_I2C_DBG("%s fail\n", __func__);
+			return rc;
+		}
 	}
 	if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
 		*data = buf[0];
@@ -410,16 +479,30 @@
 		|| num_byte == 0)
 		return rc;
 
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-	}
-	rc = msm_camera_i2c_rxdata(client, buf, num_byte);
-	if (rc < 0) {
-		S_I2C_DBG("%s fail\n", __func__);
-		return rc;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		cci_ctrl.cmd = MSM_CCI_I2C_READ;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+		cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+		cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+		}
+		rc = msm_camera_i2c_rxdata(client, buf, num_byte);
+		if (rc < 0) {
+			S_I2C_DBG("%s fail\n", __func__);
+			return rc;
+		}
 	}
 
 	S_I2C_DBG("%s addr = 0x%x", __func__, addr);
@@ -486,3 +569,17 @@
 	return rc;
 }
 
+int32_t msm_sensor_cci_util(struct msm_camera_i2c_client *client,
+	uint16_t cci_cmd)
+{
+	int32_t rc = 0;
+	struct msm_camera_cci_ctrl cci_ctrl;
+
+	CDBG("%s line %d\n", __func__, __LINE__);
+	cci_ctrl.cmd = cci_cmd;
+	cci_ctrl.cci_info = client->cci_client;
+	rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+			core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+	return cci_ctrl.status;
+}
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.h b/drivers/media/video/msm/io/msm_camera_i2c.h
index 99c2762..169a0b3 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.h
+++ b/drivers/media/video/msm/io/msm_camera_i2c.h
@@ -16,6 +16,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <mach/camera.h>
+#include <media/v4l2-subdev.h>
 
 #define CONFIG_MSM_CAMERA_I2C_DBG 0
 
@@ -103,4 +104,7 @@
 
 int32_t msm_sensor_write_all_conf_array(struct msm_camera_i2c_client *client,
 	struct msm_camera_i2c_conf_array *array, uint16_t size);
+
+int32_t msm_sensor_cci_util(struct msm_camera_i2c_client *client,
+	uint16_t cci_cmd);
 #endif
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index 29aba08..e5c1091 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -189,12 +189,25 @@
 {
 	int rc = 0, i, j;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
+	struct msm_cam_media_controller *pmctl;
+	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
 
 	mutex_lock(&pcam_inst->inst_lock);
+	if (!pcam_inst->vbqueue_initialized && pb->count) {
+		pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+		if (pmctl == NULL) {
+			pr_err("%s Invalid mctl ptr", __func__);
+			return -EINVAL;
+		}
+		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
+			pb->type);
+		pcam_inst->vbqueue_initialized = 1;
+	}
+
 	rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
 	if (rc < 0) {
 		pr_err("%s reqbufs failed %d ", __func__, rc);
@@ -564,7 +577,6 @@
 	int rc;
 	/* get the video device */
 	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
@@ -575,16 +587,6 @@
 		(void *)pfmt->fmt.pix.priv);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (pmctl == NULL)
-		return -EINVAL;
-
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
-
 	mutex_lock(&pcam->vid_lock);
 
 	rc = msm_server_set_fmt(pcam, pcam_inst->my_index, pfmt);
@@ -602,7 +604,6 @@
 {
 	int rc;
 	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 			struct msm_cam_v4l2_dev_inst, eventHandle);
@@ -610,16 +611,6 @@
 	D("%s Inst %p\n", __func__, pcam_inst);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (pmctl == NULL)
-		return -EINVAL;
-
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
-
 	mutex_lock(&pcam->vid_lock);
 	rc = msm_server_set_fmt_mplane(pcam, pcam_inst->my_index, pfmt);
 	mutex_unlock(&pcam->vid_lock);
@@ -718,6 +709,8 @@
 	SET_VIDEO_INST_IDX(pcam_inst->inst_handle, pcam_inst->my_index);
 	pcam_inst->pcam->dev_inst_map[pcam_inst->image_mode] = pcam_inst;
 	pcam_inst->path = msm_vidbuf_get_path(pcam_inst->image_mode);
+	rc = msm_cam_server_config_interface_map(pcam_inst->image_mode,
+			pcam_inst->pcam->mctl_handle);
 	D("%spath=%d,rc=%d\n", __func__,
 		pcam_inst->path, rc);
 	return rc;
@@ -834,7 +827,6 @@
 	int ion_client_created = 0;
 #endif
 	int server_q_idx = 0;
-	/*struct msm_isp_ops *p_isp = 0;*/
 	/* get the video device */
 	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
@@ -941,8 +933,7 @@
 	msm_destroy_v4l2_event_queue(&pcam_inst->eventHandle);
 
 	if (pmctl->mctl_release)
-		if (pmctl->mctl_release(pmctl) < 0)
-			pr_err("%s: mctl_release failed\n", __func__);
+		pmctl->mctl_release(pmctl);
 mctl_open_failed:
 	if (pcam->use_count == 1) {
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
@@ -1066,11 +1057,8 @@
 	if (pcam_inst->streamon) {
 		/*something went wrong since instance
 		is closing without streamoff*/
-		if (pmctl->mctl_release) {
-			rc = pmctl->mctl_release(pmctl);
-			if (rc < 0)
-				pr_err("mctl_release fails %d\n", rc);
-		}
+		if (pmctl->mctl_release)
+			pmctl->mctl_release(pmctl);
 		pmctl->mctl_release = NULL;/*so that it isn't closed again*/
 	}
 
@@ -1100,11 +1088,8 @@
 				pr_err("msm_send_close_server failed %d\n", rc);
 		}
 
-		if (pmctl->mctl_release) {
-			rc = pmctl->mctl_release(pmctl);
-			if (rc < 0)
-				pr_err("mctl_release fails %d\n", rc);
-		}
+		if (pmctl->mctl_release)
+			pmctl->mctl_release(pmctl);
 
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 		kref_put(&pmctl->refcount, msm_release_ion_client);
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index 687ed74..a2c21bd 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -148,11 +148,11 @@
 	NOTIFY_VFE_MSG_COMP_STATS, /* arg = struct msm_stats_buf */
 	NOTIFY_VFE_BUF_EVT, /* arg = struct msm_vfe_resp */
 	NOTIFY_VFE_CAMIF_ERROR,
+	NOTIFY_VFE_SOF_COUNT, /*arg = int*/
 	NOTIFY_PCLK_CHANGE, /* arg = pclk */
 	NOTIFY_CSIPHY_CFG, /* arg = msm_camera_csiphy_params */
 	NOTIFY_CSID_CFG, /* arg = msm_camera_csid_params */
 	NOTIFY_CSIC_CFG, /* arg = msm_camera_csic_params */
-	NOTIFY_VFE_BUF_FREE_EVT, /* arg = msm_camera_csic_params */
 	NOTIFY_VFE_IRQ,
 	NOTIFY_AXI_IRQ,
 	NOTIFY_GESTURE_EVT, /* arg = v4l2_event */
@@ -237,7 +237,7 @@
 	int (*mctl_cb)(void);
 	int (*mctl_cmd)(struct msm_cam_media_controller *p_mctl,
 					unsigned int cmd, unsigned long arg);
-	int (*mctl_release)(struct msm_cam_media_controller *p_mctl);
+	void (*mctl_release)(struct msm_cam_media_controller *p_mctl);
 	int (*mctl_buf_init)(struct msm_cam_v4l2_dev_inst *pcam);
 	int (*mctl_vbqueue_init)(struct msm_cam_v4l2_dev_inst *pcam,
 				struct vb2_queue *q, enum v4l2_buf_type type);
@@ -288,15 +288,10 @@
 struct msm_isp_ops {
 	char *config_dev_name;
 
-	/*int (*isp_init)(struct msm_cam_v4l2_device *pcam);*/
-	int (*isp_open)(struct v4l2_subdev *sd,
-		struct msm_cam_media_controller *mctl);
 	int (*isp_config)(struct msm_cam_media_controller *pmctl,
 		 unsigned int cmd, unsigned long arg);
 	int (*isp_notify)(struct v4l2_subdev *sd,
 		unsigned int notification, void *arg);
-	void (*isp_release)(struct msm_cam_media_controller *mctl,
-		struct v4l2_subdev *sd);
 	int (*isp_pp_cmd)(struct msm_cam_media_controller *pmctl,
 		 struct msm_mctl_pp_cmd, void *data);
 
@@ -398,9 +393,9 @@
 	struct cdev config_cdev;
 	struct v4l2_queue_util config_stat_event_queue;
 	int use_count;
-	/*struct msm_isp_ops* isp_subdev;*/
 	struct msm_cam_media_controller *p_mctl;
 	struct msm_mem_map_info mem_map;
+	int dev_num;
 };
 
 struct msm_cam_subdev_info {
@@ -490,6 +485,15 @@
 	struct intr_table_entry comp_intr_tbl[CAMERA_SS_IRQ_MAX];
 };
 
+struct interface_map {
+	/* The interafce a particular stream belongs to.
+	 * PIX0, RDI0, RDI1, or RDI2
+	 */
+	int interface;
+	/* The handle of the mctl intstance interface runs on */
+	uint32_t mctl_handle;
+};
+
 /* abstract camera server device for all sensor successfully probed*/
 struct msm_cam_server_dev {
 
@@ -505,7 +509,7 @@
 	/* info of configs successfully created*/
 	struct msm_cam_config_dev_info config_info;
 	/* active working camera device - only one allowed at this time*/
-	struct msm_cam_v4l2_device *pcam_active;
+	struct msm_cam_v4l2_device *pcam_active[MAX_NUM_ACTIVE_CAMERA];
 	/* number of camera devices opened*/
 	atomic_t number_pcam_active;
 	struct v4l2_queue_util server_command_queue;
@@ -519,6 +523,8 @@
 	struct msm_cam_server_mctl_inst mctl[MAX_NUM_ACTIVE_CAMERA];
 	uint32_t mctl_handle_cnt;
 
+	struct interface_map interface_map_table[INTF_MAX];
+
 	int use_count;
 	/* all the registered ISP subdevice*/
 	struct msm_isp_ops *isp_subdev[MSM_MAX_CAMERA_CONFIGS];
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 935ce75..3d94afd 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -67,16 +67,16 @@
 	}
 }
 
-static int msm_isp_notify_VFE_BUF_FREE_EVT(struct v4l2_subdev *sd, void *arg)
+static int msm_isp_notify_VFE_SOF_COUNT_EVT(struct v4l2_subdev *sd, void *arg)
 {
 	struct msm_vfe_cfg_cmd cfgcmd;
 	struct msm_camvfe_params vfe_params;
 	int rc;
 
-	cfgcmd.cmd_type = CMD_VFE_BUFFER_RELEASE;
+	cfgcmd.cmd_type = CMD_VFE_SOF_COUNT_UPDATE;
 	cfgcmd.value = NULL;
 	vfe_params.vfe_cfg = &cfgcmd;
-	vfe_params.data = NULL;
+	vfe_params.data = arg;
 	rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 	return 0;
 }
@@ -294,8 +294,8 @@
 	if (notification == NOTIFY_VFE_BUF_EVT)
 		return msm_isp_notify_VFE_BUF_EVT(sd, arg);
 
-	if (notification == NOTIFY_VFE_BUF_FREE_EVT)
-		return msm_isp_notify_VFE_BUF_FREE_EVT(sd, arg);
+	if (notification == NOTIFY_VFE_SOF_COUNT)
+		return msm_isp_notify_VFE_SOF_COUNT_EVT(sd, arg);
 
 	isp_event = kzalloc(sizeof(struct msm_isp_event_ctrl), GFP_ATOMIC);
 	if (!isp_event) {
@@ -499,35 +499,6 @@
 	return msm_isp_notify_vfe(sd, notification, arg);
 }
 
-/* This function is called by open() function, so we need to init HW*/
-static int msm_isp_open(struct v4l2_subdev *sd,
-	struct msm_cam_media_controller *mctl)
-{
-	/* init vfe and senor, register sync callbacks for init*/
-	int rc = 0;
-	D("%s\n", __func__);
-	if (!mctl) {
-		pr_err("%s: param is NULL", __func__);
-		return -EINVAL;
-	}
-
-	rc = v4l2_subdev_call(sd, core, ioctl,
-				VIDIOC_MSM_VFE_INIT, NULL);
-	if (rc < 0) {
-		pr_err("%s: vfe_init failed at %d\n",
-			__func__, rc);
-	}
-	return rc;
-}
-
-static void msm_isp_release(struct msm_cam_media_controller *mctl,
-	struct v4l2_subdev *sd)
-{
-	D("%s\n", __func__);
-	v4l2_subdev_call(sd, core, ioctl,
-				VIDIOC_MSM_VFE_RELEASE, NULL);
-}
-
 static int msm_config_vfe(struct v4l2_subdev *sd,
 	struct msm_cam_media_controller *mctl, void __user *arg)
 {
@@ -780,9 +751,7 @@
 	int i = 0;
 
 	for (i = 0; i < g_num_config_nodes; i++) {
-		isp_subdev[i].isp_open = msm_isp_open;
 		isp_subdev[i].isp_config = msm_isp_config;
-		isp_subdev[i].isp_release  = msm_isp_release;
 		isp_subdev[i].isp_notify = msm_isp_notify;
 	}
 	return 0;
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index fd5591c..a8d74a7 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -130,6 +130,14 @@
 	.pxlcode	= V4L2_MBUS_FMT_SGRBG10_1X10, /* Bayer sensor */
 	.colorspace = V4L2_COLORSPACE_JPEG,
 	},
+	{
+	.name	   = "YUYV",
+	.depth	  = 16,
+	.bitsperpxl = 16,
+	.fourcc	 = V4L2_PIX_FMT_YUYV,
+	.pxlcode	= V4L2_MBUS_FMT_YUYV8_2X8, /* YUV sensor */
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	},
 
 };
 
@@ -548,9 +556,8 @@
 	return rc;
 }
 
-static int msm_mctl_release(struct msm_cam_media_controller *p_mctl)
+static void msm_mctl_release(struct msm_cam_media_controller *p_mctl)
 {
-	int rc = 0;
 	struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(p_mctl->sensor_sdev);
 	struct msm_camera_sensor_info *sinfo =
 		(struct msm_camera_sensor_info *) s_ctrl->sensordata;
@@ -573,11 +580,6 @@
 			VIDIOC_MSM_AXI_RELEASE, NULL);
 	}
 
-	if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_release
-		&& p_mctl->isp_sdev->sd)
-		p_mctl->isp_sdev->isp_release(p_mctl,
-			p_mctl->isp_sdev->sd);
-
 	if (p_mctl->csid_sdev) {
 		v4l2_subdev_call(p_mctl->csid_sdev, core, ioctl,
 			VIDIOC_MSM_CSID_RELEASE, NULL);
@@ -601,7 +603,6 @@
 	pm_qos_remove_request(&p_mctl->pm_qos_req_list);
 
 	wake_unlock(&p_mctl->wake_lock);
-	return rc;
 }
 
 int msm_mctl_init_user_formats(struct msm_cam_v4l2_device *pcam)
@@ -860,6 +861,7 @@
 
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
 	mutex_lock(&pcam->mctl_node.dev_lock);
+	mutex_lock(&pcam_inst->inst_lock);
 	D("%s : active %d ", __func__, pcam->mctl_node.active);
 	if (pcam->mctl_node.active == 1) {
 		rc = msm_cam_server_close_mctl_session(pcam);
@@ -872,6 +874,7 @@
 		pmctl = NULL;
 	}
 	pcam_inst->streamon = 0;
+	pcam->mctl_node.use_count--;
 	pcam->mctl_node.dev_inst_map[pcam_inst->image_mode] = NULL;
 	if (pcam_inst->vbqueue_initialized)
 		vb2_queue_release(&pcam_inst->vid_bufq);
@@ -880,17 +883,16 @@
 	msm_destroy_v4l2_event_queue(&pcam_inst->eventHandle);
 	CLR_MCTLPP_INST_IDX(pcam_inst->inst_handle);
 	CLR_IMG_MODE(pcam_inst->inst_handle);
-
+	mutex_unlock(&pcam_inst->inst_lock);
 	mutex_destroy(&pcam_inst->inst_lock);
 
 	kfree(pcam_inst);
+	f->private_data = NULL;
 	if (NULL != pmctl) {
 		D("%s : release ion client", __func__);
 		kref_put(&pmctl->refcount, msm_release_ion_client);
 	}
-	f->private_data = NULL;
 	mutex_unlock(&pcam->mctl_node.dev_lock);
-	pcam->mctl_node.use_count--;
 	D("%s : use_count %d X ", __func__, pcam->mctl_node.use_count);
 	return rc;
 }
@@ -995,12 +997,24 @@
 {
 	int rc = 0, i, j;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
+	struct msm_cam_media_controller *pmctl;
+	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
 
 	mutex_lock(&pcam_inst->inst_lock);
+	if (!pcam_inst->vbqueue_initialized && pb->count) {
+		pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+		if (pmctl == NULL) {
+			pr_err("%s Invalid mctl ptr", __func__);
+			return -EINVAL;
+		}
+		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
+			pb->type);
+		pcam_inst->vbqueue_initialized = 1;
+	}
 	rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
 	if (rc < 0) {
 		pr_err("%s reqbufs failed %d ", __func__, rc);
@@ -1313,30 +1327,10 @@
 					struct v4l2_format *pfmt)
 {
 	int rc = 0;
-	/* get the video device */
-	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
-	struct msm_cam_v4l2_dev_inst *pcam_inst;
-	pcam_inst = container_of(f->private_data,
-		struct msm_cam_v4l2_dev_inst, eventHandle);
 
 	D("%s\n", __func__);
-	D("%s, inst=0x%x,idx=%d,priv = 0x%p\n",
-		__func__, (u32)pcam_inst, pcam_inst->my_index,
-		(void *)pfmt->fmt.pix.priv);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (!pmctl) {
-		pr_err("%s mctl ptr is null ", __func__);
-		return -EINVAL;
-	}
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
-
 	return rc;
 }
 
@@ -1345,25 +1339,13 @@
 {
 	int rc = 0, i;
 	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 			struct msm_cam_v4l2_dev_inst, eventHandle);
 
-	D("%s Inst %p vbqueue %d\n", __func__,
-		pcam_inst, pcam_inst->vbqueue_initialized);
+	D("%s Inst %p\n", __func__, pcam_inst);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (!pmctl) {
-		pr_err("%s mctl ptr is null ", __func__);
-		return -EINVAL;
-	}
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
 	for (i = 0; i < pcam->num_fmts; i++)
 		if (pcam->usr_fmts[i].fourcc == pfmt->fmt.pix_mp.pixelformat)
 			break;
@@ -1467,6 +1449,9 @@
 	pcam_inst->pcam->mctl_node.dev_inst_map[pcam_inst->image_mode] =
 		pcam_inst;
 	pcam_inst->path = msm_mctl_vidbuf_get_path(pcam_inst->image_mode);
+
+	rc = msm_cam_server_config_interface_map(pcam_inst->image_mode,
+			pcam_inst->pcam->mctl_handle);
 	D("%s path=%d, image mode = %d rc=%d\n", __func__,
 		pcam_inst->path, pcam_inst->image_mode, rc);
 	return rc;
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index 3cd6a25..9f7f689 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -212,11 +212,6 @@
 	pcam = pcam_inst->pcam;
 	buf = container_of(vb, struct msm_frame_buffer, vidbuf);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (pmctl == NULL) {
-		pr_err("%s No mctl found\n", __func__);
-		return;
-	}
 
 	if (pcam_inst->vid_fmt.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		for (i = 0; i < vb->num_planes; i++) {
@@ -262,6 +257,12 @@
 		}
 		spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
 	}
+	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		buf->state = MSM_BUFFER_STATE_UNUSED;
+		return;
+	}
 	for (i = 0; i < vb->num_planes; i++) {
 		mem = vb2_plane_cookie(vb, i);
 		videobuf2_pmem_contig_user_put(mem, pmctl->client);
@@ -551,7 +552,8 @@
 	 * If mctl node doesnt have the instance, then
 	 * search in the user's video node */
 	if (pmctl->vfe_output_mode == VFE_OUTPUTS_MAIN_AND_THUMB
-		|| pmctl->vfe_output_mode == VFE_OUTPUTS_THUMB_AND_MAIN) {
+		|| pmctl->vfe_output_mode == VFE_OUTPUTS_THUMB_AND_MAIN
+		|| pmctl->vfe_output_mode == VFE_OUTPUTS_MAIN_AND_PREVIEW) {
 		if (pcam->mctl_node.dev_inst_map[img_mode]
 		&& is_buffer_queued(pcam, img_mode)) {
 			idx = pcam->mctl_node.dev_inst_map[img_mode]->my_index;
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index 72f3f3d..f687573 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -605,6 +605,781 @@
 	return 0;
 }
 
+static int32_t msm_sensor_init_flash_data(struct device_node *of_node,
+	struct  msm_camera_sensor_info *sensordata)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+
+	sensordata->flash_data = kzalloc(sizeof(
+		struct msm_camera_sensor_flash_data), GFP_KERNEL);
+	if (!sensordata->flash_data) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32(of_node, "flash_type", &val);
+	CDBG("%s flash_type %d, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+	sensordata->flash_data->flash_type = val;
+	return rc;
+ERROR:
+	kfree(sensordata->flash_data);
+	return rc;
+}
+
+static int32_t msm_sensor_init_vreg_data(struct device_node *of_node,
+	struct msm_camera_sensor_platform_info *pinfo)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	count = of_property_count_strings(of_node, "cam_vreg_name");
+	CDBG("%s cam_vreg_name count %d\n", __func__, count);
+
+	if (!count)
+		return 0;
+
+	pinfo->cam_vreg = kzalloc(sizeof(struct camera_vreg_t) * count,
+		GFP_KERNEL);
+	if (!pinfo->cam_vreg) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	pinfo->num_vreg = count;
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "cam_vreg_name", i,
+			&pinfo->cam_vreg[i].reg_name);
+		CDBG("%s reg_name[%d] = %s\n", __func__, i,
+			pinfo->cam_vreg[i].reg_name);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto ERROR1;
+		}
+	}
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_type", val_array,
+		count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].type = val_array[i];
+		CDBG("%s cam_vreg[%d].type = %d\n", __func__, i,
+			pinfo->cam_vreg[i].type);
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_min_voltage",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].min_voltage = val_array[i];
+		CDBG("%s cam_vreg[%d].min_voltage = %d\n", __func__,
+			i, pinfo->cam_vreg[i].min_voltage);
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_max_voltage",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].max_voltage = val_array[i];
+		CDBG("%s cam_vreg[%d].max_voltage = %d\n", __func__,
+			i, pinfo->cam_vreg[i].max_voltage);
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_op_mode", val_array,
+		count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].op_mode = val_array[i];
+		CDBG("%s cam_vreg[%d].op_mode = %d\n", __func__, i,
+			pinfo->cam_vreg[i].op_mode);
+	}
+
+	kfree(val_array);
+	return rc;
+ERROR2:
+	kfree(val_array);
+ERROR1:
+	kfree(pinfo->cam_vreg);
+	pinfo->num_vreg = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_common_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio_common_tbl_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	gconf->cam_gpio_common_tbl = kzalloc(sizeof(struct gpio) * count,
+		GFP_KERNEL);
+	if (!gconf->cam_gpio_common_tbl) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+	gconf->cam_gpio_common_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_common_tbl_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_common_tbl[i].gpio = val_array[i];
+		CDBG("%s cam_gpio_common_tbl[%d].gpio = %d\n", __func__, i,
+			gconf->cam_gpio_common_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_common_tbl_flags",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_common_tbl[i].flags = val_array[i];
+		CDBG("%s cam_gpio_common_tbl[%d].flags = %ld\n", __func__, i,
+			gconf->cam_gpio_common_tbl[i].flags);
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"gpio_common_tbl_label", i,
+			&gconf->cam_gpio_common_tbl[i].label);
+		CDBG("%s cam_gpio_common_tbl[%d].label = %s\n", __func__, i,
+			gconf->cam_gpio_common_tbl[i].label);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto ERROR2;
+		}
+	}
+
+	kfree(val_array);
+	return rc;
+
+ERROR2:
+	kfree(gconf->cam_gpio_common_tbl);
+ERROR1:
+	kfree(val_array);
+	gconf->cam_gpio_common_tbl_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_req_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio_req_tbl_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	gconf->cam_gpio_req_tbl = kzalloc(sizeof(struct gpio) * count,
+		GFP_KERNEL);
+	if (!gconf->cam_gpio_req_tbl) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+	gconf->cam_gpio_req_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_req_tbl_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_req_tbl[i].gpio = val_array[i];
+		CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_req_tbl_flags",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+		CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].flags);
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"gpio_req_tbl_label", i,
+			&gconf->cam_gpio_req_tbl[i].label);
+		CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].label);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto ERROR2;
+		}
+	}
+
+	kfree(val_array);
+	return rc;
+
+ERROR2:
+	kfree(gconf->cam_gpio_req_tbl);
+ERROR1:
+	kfree(val_array);
+	gconf->cam_gpio_req_tbl_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_set_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio_set_tbl_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	gconf->cam_gpio_set_tbl = kzalloc(sizeof(struct msm_gpio_set_tbl) *
+		count, GFP_KERNEL);
+	if (!gconf->cam_gpio_set_tbl) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+	gconf->cam_gpio_set_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_set_tbl_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_set_tbl[i].gpio = val_array[i];
+		CDBG("%s cam_gpio_set_tbl[%d].gpio = %d\n", __func__, i,
+			gconf->cam_gpio_set_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_set_tbl_flags",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_set_tbl[i].flags = val_array[i];
+		CDBG("%s cam_gpio_set_tbl[%d].flags = %ld\n", __func__, i,
+			gconf->cam_gpio_set_tbl[i].flags);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_set_tbl_delay",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_set_tbl[i].delay = val_array[i];
+		CDBG("%s cam_gpio_set_tbl[%d].delay = %d\n", __func__, i,
+			gconf->cam_gpio_set_tbl[i].delay);
+	}
+
+	kfree(val_array);
+	return rc;
+
+ERROR2:
+	kfree(gconf->cam_gpio_set_tbl);
+ERROR1:
+	kfree(val_array);
+	gconf->cam_gpio_set_tbl_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_tlmm_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+	struct gpio_tlmm_cfg *tlmm_cfg = NULL;
+
+	if (!of_get_property(of_node, "gpio_tlmm_table_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	tlmm_cfg = kzalloc(sizeof(struct gpio_tlmm_cfg) * count, GFP_KERNEL);
+	if (!tlmm_cfg) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	gconf->camera_off_table = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!gconf->camera_off_table) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR2;
+	}
+	gconf->camera_off_table_size = count;
+
+	gconf->camera_on_table = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!gconf->camera_on_table) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR3;
+	}
+	gconf->camera_on_table_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].gpio = val_array[i];
+		CDBG("%s tlmm_cfg[%d].gpio = %d\n", __func__, i,
+			tlmm_cfg[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_dir",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].dir = val_array[i];
+		CDBG("%s tlmm_cfg[%d].dir = %d\n", __func__, i,
+			tlmm_cfg[i].dir);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_pull",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].pull = val_array[i];
+		CDBG("%s tlmm_cfg[%d].pull = %d\n", __func__, i,
+			tlmm_cfg[i].pull);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_drvstr",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].drvstr = val_array[i];
+		CDBG("%s tlmm_cfg[%d].drvstr = %d\n", __func__, i,
+			tlmm_cfg[i].drvstr);
+	}
+
+	for (i = 0; i < count; i++) {
+		gconf->camera_off_table[i] = GPIO_CFG(tlmm_cfg[i].gpio,
+			0, tlmm_cfg[i].dir, tlmm_cfg[i].pull,
+			tlmm_cfg[i].drvstr);
+		gconf->camera_on_table[i] = GPIO_CFG(tlmm_cfg[i].gpio,
+			1, tlmm_cfg[i].dir, tlmm_cfg[i].pull,
+			tlmm_cfg[i].drvstr);
+	}
+
+	kfree(tlmm_cfg);
+	kfree(val_array);
+	return rc;
+
+ERROR4:
+	kfree(gconf->camera_on_table);
+ERROR3:
+	kfree(gconf->camera_off_table);
+ERROR2:
+	kfree(tlmm_cfg);
+ERROR1:
+	kfree(val_array);
+	gconf->camera_off_table_size = 0;
+	gconf->camera_on_table_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_csi_data(struct device_node *of_node,
+	struct  msm_camera_sensor_info *sensordata)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0, val = 0;
+	uint32_t *val_array = NULL;
+	struct msm_camera_sensor_platform_info *pinfo =
+		sensordata->sensor_platform_info;
+
+	rc = of_property_read_u32(of_node, "csi_if", &count);
+	CDBG("%s csi_if %d, rc %d\n", __func__, count, rc);
+	if (rc < 0 || !count)
+		return rc;
+	sensordata->csi_if = count;
+
+	sensordata->pdata = kzalloc(sizeof(
+		struct msm_camera_device_platform_data) * count, GFP_KERNEL);
+	if (!sensordata->pdata) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32_array(of_node, "csid_core", val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		sensordata->pdata[i].csid_core = val_array[i];
+		CDBG("%s csid_core[%d].csid_core = %d\n", __func__, i,
+			sensordata->pdata[i].csid_core);
+	}
+
+	rc = of_property_read_u32_array(of_node, "is_vpe", val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		sensordata->pdata[i].is_vpe = val_array[i];
+		CDBG("%s csid_core[%d].is_vpe = %d\n", __func__, i,
+			sensordata->pdata[i].is_vpe);
+	}
+
+	pinfo->csi_lane_params = kzalloc(
+		sizeof(struct msm_camera_csi_lane_params), GFP_KERNEL);
+	if (!pinfo->csi_lane_params) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR2;
+	}
+
+	rc = of_property_read_u32(of_node, "csi_lane_assign", &val);
+	CDBG("%s csi_lane_assign %x, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR3;
+	}
+	pinfo->csi_lane_params->csi_lane_assign = val;
+
+	rc = of_property_read_u32(of_node, "csi_lane_mask", &val);
+	CDBG("%s csi_lane_mask %x, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR3;
+	}
+	pinfo->csi_lane_params->csi_lane_mask = val;
+
+	kfree(val_array);
+	return rc;
+ERROR3:
+	kfree(pinfo->csi_lane_params);
+ERROR2:
+	kfree(val_array);
+ERROR1:
+	kfree(sensordata->pdata);
+	sensordata->csi_if = 0;
+	return rc;
+}
+static int32_t msm_sensor_init_actuator_data(struct device_node *of_node,
+	struct  msm_camera_sensor_info *sensordata)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+
+	rc = of_property_read_u32(of_node, "actuator_cam_name", &val);
+	CDBG("%s actuator_cam_name %d, rc %d\n", __func__, val, rc);
+	if (rc < 0)
+		return 0;
+
+	sensordata->actuator_info = kzalloc(sizeof(struct msm_actuator_info),
+		GFP_KERNEL);
+	if (!sensordata->actuator_info) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR;
+	}
+
+	sensordata->actuator_info->cam_name = val;
+
+	rc = of_property_read_u32(of_node, "actuator_vcm_pwd", &val);
+	CDBG("%s actuator_vcm_pwd %d, rc %d\n", __func__, val, rc);
+	if (!rc)
+		sensordata->actuator_info->vcm_pwd = val;
+
+	rc = of_property_read_u32(of_node, "actuator_vcm_enable", &val);
+	CDBG("%s actuator_vcm_enable %d, rc %d\n", __func__, val, rc);
+	if (!rc)
+		sensordata->actuator_info->vcm_enable = val;
+
+	return 0;
+ERROR:
+	return rc;
+}
+
+static int32_t msm_sensor_init_sensor_data(struct platform_device *pdev,
+	struct msm_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+	struct device_node *of_node = pdev->dev.of_node;
+	struct msm_camera_sensor_platform_info *pinfo = NULL;
+	struct msm_camera_gpio_conf *gconf = NULL;
+	struct msm_camera_sensor_info *sensordata = NULL;
+
+	s_ctrl->sensordata = kzalloc(sizeof(struct msm_camera_sensor_info),
+		GFP_KERNEL);
+	if (!s_ctrl->sensordata) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	sensordata = s_ctrl->sensordata;
+	rc = of_property_read_string(of_node, "sensor_name",
+		&sensordata->sensor_name);
+	CDBG("%s sensor_name %s, rc %d\n", __func__,
+		sensordata->sensor_name, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32(of_node, "camera_type", &val);
+	CDBG("%s camera_type %d, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+	sensordata->camera_type = val;
+
+	rc = of_property_read_u32(of_node, "sensor_type", &val);
+	CDBG("%s sensor_type %d, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+	sensordata->sensor_type = val;
+
+	rc = msm_sensor_init_flash_data(of_node, sensordata);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+
+	sensordata->sensor_platform_info = kzalloc(sizeof(
+		struct msm_camera_sensor_platform_info), GFP_KERNEL);
+	if (!sensordata->sensor_platform_info) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	pinfo = sensordata->sensor_platform_info;
+
+	rc = of_property_read_u32(of_node, "mount_angle", &pinfo->mount_angle);
+	CDBG("%s mount_angle %d, rc %d\n", __func__, pinfo->mount_angle, rc);
+	if (rc < 0) {
+		/* Set default mount angle */
+		pinfo->mount_angle = 0;
+		rc = 0;
+	}
+
+	rc = msm_sensor_init_csi_data(of_node, sensordata);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+
+	rc = msm_sensor_init_vreg_data(of_node, pinfo);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR3;
+	}
+
+	pinfo->gpio_conf = kzalloc(sizeof(struct msm_camera_gpio_conf),
+		GFP_KERNEL);
+	if (!pinfo->gpio_conf) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR4;
+	}
+	gconf = pinfo->gpio_conf;
+	rc = of_property_read_u32(of_node, "gpio_no_mux", &gconf->gpio_no_mux);
+	CDBG("%s gconf->gpio_no_mux %d, rc %d\n", __func__,
+		gconf->gpio_no_mux, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR5;
+	}
+
+	rc = msm_sensor_init_gpio_common_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR5;
+	}
+
+	rc = msm_sensor_init_gpio_req_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR6;
+	}
+
+	rc = msm_sensor_init_gpio_set_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR7;
+	}
+
+	rc = msm_sensor_init_gpio_tlmm_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR8;
+	}
+
+	rc = msm_sensor_init_actuator_data(of_node, sensordata);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR9;
+	}
+
+	return rc;
+
+ERROR9:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_on_table);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_off_table);
+ERROR8:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_set_tbl);
+ERROR7:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_req_tbl);
+ERROR6:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_common_tbl);
+ERROR5:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf);
+ERROR4:
+	kfree(s_ctrl->sensordata->sensor_platform_info->cam_vreg);
+ERROR3:
+	kfree(s_ctrl->sensordata->sensor_platform_info->csi_lane_params);
+	kfree(s_ctrl->sensordata->pdata);
+ERROR2:
+	kfree(s_ctrl->sensordata->sensor_platform_info);
+	kfree(s_ctrl->sensordata->flash_data);
+ERROR1:
+	kfree(s_ctrl->sensordata);
+	return rc;
+}
+
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	if (!s_ctrl->pdev)
+		return 0;
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_on_table);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_off_table);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_set_tbl);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_req_tbl);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_common_tbl);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf);
+	kfree(s_ctrl->sensordata->sensor_platform_info->cam_vreg);
+	kfree(s_ctrl->sensordata->sensor_platform_info->csi_lane_params);
+	kfree(s_ctrl->sensordata->pdata);
+	kfree(s_ctrl->sensordata->sensor_platform_info);
+	kfree(s_ctrl->sensordata->flash_data);
+	kfree(s_ctrl->sensordata);
+	return 0;
+}
+
 int32_t msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
 {
 	int32_t rc = 0;
@@ -666,8 +1441,21 @@
 		data->sensor_platform_info->i2c_conf->use_i2c_mux)
 		msm_sensor_enable_i2c_mux(data->sensor_platform_info->i2c_conf);
 
+	if (s_ctrl->sensor_i2c_client->cci_client) {
+		rc = msm_sensor_cci_util(s_ctrl->sensor_i2c_client,
+			MSM_CCI_INIT);
+		if (rc < 0) {
+			pr_err("%s cci_init failed\n", __func__);
+			goto cci_init_failed;
+		}
+	}
 	return rc;
 
+cci_init_failed:
+	if (data->sensor_platform_info->i2c_conf &&
+		data->sensor_platform_info->i2c_conf->use_i2c_mux)
+		msm_sensor_disable_i2c_mux(
+			data->sensor_platform_info->i2c_conf);
 enable_clk_failed:
 		msm_camera_config_gpio_table(data, 0);
 config_gpio_failed:
@@ -692,6 +1480,11 @@
 {
 	struct msm_camera_sensor_info *data = s_ctrl->sensordata;
 	CDBG("%s\n", __func__);
+	if (s_ctrl->sensor_i2c_client->cci_client) {
+		msm_sensor_cci_util(s_ctrl->sensor_i2c_client,
+			MSM_CCI_RELEASE);
+	}
+
 	if (data->sensor_platform_info->i2c_conf &&
 		data->sensor_platform_info->i2c_conf->use_i2c_mux)
 		msm_sensor_disable_i2c_mux(
@@ -729,7 +1522,8 @@
 		return rc;
 	}
 
-	CDBG("msm_sensor id: %d\n", chipid);
+	CDBG("%s msm_sensor id: %x, exp id: %x\n", __func__, chipid,
+		s_ctrl->sensor_id_info->sensor_id);
 	if (chipid != s_ctrl->sensor_id_info->sensor_id) {
 		pr_err("msm_sensor_match_id chip id doesnot match\n");
 		return -ENODEV;
@@ -810,6 +1604,90 @@
 	return rc;
 }
 
+static int msm_sensor_subdev_match_core(struct device *dev, void *data)
+{
+	int core_index = (int)data;
+	struct platform_device *pdev = to_platform_device(dev);
+	CDBG("%s cci pdev %p\n", __func__, pdev);
+	if (pdev->id == core_index)
+		return 1;
+	else
+		return 0;
+}
+
+int32_t msm_sensor_platform_probe(struct platform_device *pdev, void *data)
+{
+	int32_t rc = 0;
+	struct msm_sensor_ctrl_t *s_ctrl = (struct msm_sensor_ctrl_t *)data;
+	struct device_driver *driver;
+	struct device *dev;
+	s_ctrl->pdev = pdev;
+	CDBG("%s called data %p\n", __func__, data);
+	if (pdev->dev.of_node) {
+		rc = msm_sensor_init_sensor_data(pdev, s_ctrl);
+		if (rc < 0) {
+			pr_err("%s failed line %d\n", __func__, __LINE__);
+			return rc;
+		}
+	}
+	s_ctrl->sensor_i2c_client->cci_client = kzalloc(sizeof(
+		struct msm_camera_cci_client), GFP_KERNEL);
+	if (!s_ctrl->sensor_i2c_client->cci_client) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+	driver = driver_find(MSM_CCI_DRV_NAME, &platform_bus_type);
+	if (!driver) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+
+	dev = driver_find_device(driver, NULL, 0,
+				msm_sensor_subdev_match_core);
+	if (!dev) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+	s_ctrl->sensor_i2c_client->cci_client->cci_subdev =
+		dev_get_drvdata(dev);
+	CDBG("%s sd %p\n", __func__,
+		s_ctrl->sensor_i2c_client->cci_client->cci_subdev);
+	s_ctrl->sensor_i2c_client->cci_client->cci_i2c_master = MASTER_0;
+	s_ctrl->sensor_i2c_client->cci_client->sid =
+		s_ctrl->sensor_i2c_addr >> 1;
+	s_ctrl->sensor_i2c_client->cci_client->retries = 0;
+	s_ctrl->sensor_i2c_client->cci_client->id_map = 0;
+
+	rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+	if (rc < 0) {
+		pr_err("%s %s power up failed\n", __func__,
+			pdev->id_entry->name);
+		return rc;
+	}
+
+	if (s_ctrl->func_tbl->sensor_match_id)
+		rc = s_ctrl->func_tbl->sensor_match_id(s_ctrl);
+	else
+		rc = msm_sensor_match_id(s_ctrl);
+	if (rc < 0)
+		goto probe_fail;
+
+	v4l2_subdev_init(&s_ctrl->sensor_v4l2_subdev,
+		s_ctrl->sensor_v4l2_subdev_ops);
+	snprintf(s_ctrl->sensor_v4l2_subdev.name,
+		sizeof(s_ctrl->sensor_v4l2_subdev.name), "%s",
+		s_ctrl->sensordata->sensor_name);
+	v4l2_set_subdevdata(&s_ctrl->sensor_v4l2_subdev, pdev);
+	msm_sensor_register(&s_ctrl->sensor_v4l2_subdev);
+
+	goto power_down;
+probe_fail:
+	pr_err("%s %s probe failed\n", __func__, pdev->id_entry->name);
+power_down:
+	s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+	return rc;
+}
+
 int32_t msm_sensor_power(struct v4l2_subdev *sd, int on)
 {
 	int rc = 0;
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index a3ddaa7..dc394e1 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -22,6 +22,9 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gpio.h>
 #include <mach/camera.h>
 #include <mach/gpio.h>
 #include <media/msm_camera.h>
@@ -35,6 +38,13 @@
 #define MSM_SENSOR_MCLK_16HZ 16000000
 #define MSM_SENSOR_MCLK_24HZ 24000000
 
+struct gpio_tlmm_cfg {
+	uint32_t gpio;
+	uint32_t dir;
+	uint32_t pull;
+	uint32_t drvstr;
+};
+
 enum msm_sensor_reg_update {
 	/* Sensor egisters that need to be updated during initialization */
 	MSM_SENSOR_REG_INIT,
@@ -151,6 +161,7 @@
 	struct i2c_client *msm_sensor_client;
 	struct i2c_driver *sensor_i2c_driver;
 	struct msm_camera_i2c_client *sensor_i2c_client;
+	struct platform_device *pdev;
 	uint16_t sensor_i2c_addr;
 
 	struct msm_sensor_output_reg_addr_t *sensor_output_reg_addr;
@@ -212,6 +223,9 @@
 int32_t msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl);
 int msm_sensor_i2c_probe(struct i2c_client *client,
 	const struct i2c_device_id *id);
+
+int32_t msm_sensor_platform_probe(struct platform_device *pdev, void *data);
+
 int32_t msm_sensor_power(struct v4l2_subdev *sd, int on);
 
 int32_t msm_sensor_v4l2_s_ctrl(struct v4l2_subdev *sd,
@@ -256,6 +270,8 @@
 int32_t msm_sensor_get_csi_params(struct msm_sensor_ctrl_t *s_ctrl,
 		struct csi_lane_params_t *sensor_output_info);
 
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl);
+
 struct msm_sensor_ctrl_t *get_sctrl(struct v4l2_subdev *sd);
 
 #define VIDIOC_MSM_SENSOR_CFG \
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index e4c5061..e08cd0a 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -749,11 +749,51 @@
 	.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
 };
 
+
+static const struct of_device_id ov2720_dt_match[] = {
+	{.compatible = "qcom,ov2720", .data = &ov2720_s_ctrl},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, ov2720_dt_match);
+
+static struct platform_driver ov2720_platform_driver = {
+	.driver = {
+		.name = "qcom,ov2720",
+		.owner = THIS_MODULE,
+		.of_match_table = ov2720_dt_match,
+	},
+};
+
+static int32_t ov2720_platform_probe(struct platform_device *pdev)
+{
+	int32_t rc = 0;
+	const struct of_device_id *match;
+	match = of_match_device(ov2720_dt_match, &pdev->dev);
+	rc = msm_sensor_platform_probe(pdev, match->data);
+	return rc;
+}
+
 static int __init msm_sensor_init_module(void)
 {
+	int32_t rc = 0;
+	rc = platform_driver_probe(&ov2720_platform_driver,
+		ov2720_platform_probe);
+	if (!rc)
+		return rc;
 	return i2c_add_driver(&ov2720_i2c_driver);
 }
 
+static void __exit msm_sensor_exit_module(void)
+{
+	if (ov2720_s_ctrl.pdev) {
+		msm_sensor_free_sensor_data(&ov2720_s_ctrl);
+		platform_driver_unregister(&ov2720_platform_driver);
+	} else
+		i2c_del_driver(&ov2720_i2c_driver);
+	return;
+}
+
 static struct v4l2_subdev_core_ops ov2720_subdev_core_ops = {
 	.ioctl = msm_sensor_subdev_ioctl,
 	.s_power = msm_sensor_power,
@@ -824,6 +864,7 @@
 };
 
 module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
 MODULE_DESCRIPTION("Omnivision 2MP Bayer sensor driver");
 MODULE_LICENSE("GPL v2");
 
diff --git a/drivers/media/video/msm/sensors/s5k3l1yx.c b/drivers/media/video/msm/sensors/s5k3l1yx.c
index f480a1c..64b004e 100644
--- a/drivers/media/video/msm/sensors/s5k3l1yx.c
+++ b/drivers/media/video/msm/sensors/s5k3l1yx.c
@@ -618,11 +618,50 @@
 	.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
 };
 
+static const struct of_device_id s5k3l1yx_dt_match[] = {
+	{.compatible = "qcom,s5k3l1yx", .data = &s5k3l1yx_s_ctrl},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, s5k3l1yx_dt_match);
+
+static struct platform_driver s5k3l1yx_platform_driver = {
+	.driver = {
+		.name = "qcom,s5k3l1yx",
+		.owner = THIS_MODULE,
+		.of_match_table = s5k3l1yx_dt_match,
+	},
+};
+
+static int32_t s5k3l1yx_platform_probe(struct platform_device *pdev)
+{
+	int32_t rc = 0;
+	const struct of_device_id *match;
+	match = of_match_device(s5k3l1yx_dt_match, &pdev->dev);
+	rc = msm_sensor_platform_probe(pdev, match->data);
+	return rc;
+}
+
 static int __init msm_sensor_init_module(void)
 {
+	int32_t rc = 0;
+	rc = platform_driver_probe(&s5k3l1yx_platform_driver,
+		s5k3l1yx_platform_probe);
+	if (!rc)
+		return rc;
 	return i2c_add_driver(&s5k3l1yx_i2c_driver);
 }
 
+static void __exit msm_sensor_exit_module(void)
+{
+	if (s5k3l1yx_s_ctrl.pdev) {
+		msm_sensor_free_sensor_data(&s5k3l1yx_s_ctrl);
+		platform_driver_unregister(&s5k3l1yx_platform_driver);
+	} else
+		i2c_del_driver(&s5k3l1yx_i2c_driver);
+	return;
+}
+
 static struct v4l2_subdev_core_ops s5k3l1yx_subdev_core_ops = {
 	.ioctl = msm_sensor_subdev_ioctl,
 	.s_power = msm_sensor_power,
@@ -693,5 +732,6 @@
 };
 
 module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
 MODULE_DESCRIPTION("Samsung 12MP Bayer sensor driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 36b9576..7d58091 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -108,6 +108,58 @@
 	v4l2_fh_exit(eventHandle);
 }
 
+int msm_cam_server_config_interface_map(u32 extendedmode, uint32_t mctl_handle)
+{
+	int i = 0;
+	int rc = 0;
+	int old_handle;
+	int interface;
+
+	switch (extendedmode) {
+	case MSM_V4L2_EXT_CAPTURE_MODE_RDI:
+		interface = RDI_0;
+		break;
+	case MSM_V4L2_EXT_CAPTURE_MODE_RDI1:
+		interface = RDI_1;
+		break;
+	case MSM_V4L2_EXT_CAPTURE_MODE_RDI2:
+		interface = RDI_2;
+		break;
+	default:
+		interface = PIX_0;
+		break;
+	}
+	for (i = 0; i < INTF_MAX; i++) {
+		if (g_server_dev.interface_map_table[i].interface ==
+							interface) {
+			old_handle =
+				g_server_dev.interface_map_table[i].mctl_handle;
+			if (old_handle == 0) {
+				g_server_dev.interface_map_table[i].mctl_handle
+					= mctl_handle;
+			} else if (old_handle != mctl_handle) {
+				pr_err("%s: interface_map[%d] was set: %d\n",
+					__func__, i, old_handle);
+				rc = -EINVAL;
+			}
+			break;
+		}
+	}
+
+	if (i == INTF_MAX)
+		rc = -EINVAL;
+	return rc;
+}
+
+void msm_cam_server_clear_interface_map(uint32_t mctl_handle)
+{
+	int i;
+	for (i = 0; i < INTF_MAX; i++)
+		if (g_server_dev.interface_map_table[i].mctl_handle ==
+								mctl_handle)
+			g_server_dev.interface_map_table[i].mctl_handle = 0;
+}
+
 uint32_t msm_cam_server_get_mctl_handle(void)
 {
 	uint32_t i;
@@ -146,13 +198,15 @@
 	return NULL;
 }
 
-static void msm_cam_server_send_error_evt(int evt_type)
+
+static void msm_cam_server_send_error_evt(
+		struct msm_cam_media_controller *pmctl, int evt_type)
 {
 	struct v4l2_event v4l2_ev;
 	v4l2_ev.id = 0;
 	v4l2_ev.type = evt_type;
 	ktime_get_ts(&v4l2_ev.timestamp);
-	v4l2_event_queue(g_server_dev.pcam_active->pvdev, &v4l2_ev);
+	v4l2_event_queue(pmctl->pcam_ptr->pvdev, &v4l2_ev);
 }
 
 static int msm_ctrl_cmd_done(void *arg)
@@ -370,7 +424,8 @@
 	ctrlcmd.vnode_id = pcam->vnode_id;
 	ctrlcmd.queue_idx = pcam->server_queue_idx;
 	ctrlcmd.stream_type = pcam->dev_inst[idx]->image_mode;
-	ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[0];
+	ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[
+						pcam->server_queue_idx];
 
 	/* send command to config thread in userspace, and get return value */
 	rc = msm_server_control(&g_server_dev, &ctrlcmd);
@@ -384,15 +439,17 @@
 {
 	int rc = 0;
 	struct msm_ctrl_cmd ctrlcmd;
+	int idx = pcam->server_queue_idx;
 	D("%s qid %d\n", __func__, pcam->server_queue_idx);
 	ctrlcmd.type	   = MSM_V4L2_OPEN;
 	ctrlcmd.timeout_ms = 10000;
-	ctrlcmd.length	 = strnlen(g_server_dev.config_info.config_dev_name[0],
-				MAX_DEV_NAME_LEN)+1;
-	ctrlcmd.value    = (char *)g_server_dev.config_info.config_dev_name[0];
+	ctrlcmd.length = strnlen(
+		g_server_dev.config_info.config_dev_name[idx],
+		MAX_DEV_NAME_LEN)+1;
+	ctrlcmd.value = (char *)g_server_dev.config_info.config_dev_name[idx];
 	ctrlcmd.vnode_id = pcam->vnode_id;
 	ctrlcmd.queue_idx = pcam->server_queue_idx;
-	ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[0];
+	ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[idx];
 
 	/* send command to config thread in usersspace, and get return value */
 	rc = msm_server_control(&g_server_dev, &ctrlcmd);
@@ -407,12 +464,14 @@
 	D("%s qid %d\n", __func__, pcam->server_queue_idx);
 	ctrlcmd.type	   = MSM_V4L2_CLOSE;
 	ctrlcmd.timeout_ms = 10000;
-	ctrlcmd.length	 = strnlen(g_server_dev.config_info.config_dev_name[0],
-				MAX_DEV_NAME_LEN)+1;
-	ctrlcmd.value    = (char *)g_server_dev.config_info.config_dev_name[0];
+	ctrlcmd.length	 = strnlen(g_server_dev.config_info.config_dev_name[
+				pcam->server_queue_idx], MAX_DEV_NAME_LEN)+1;
+	ctrlcmd.value    = (char *)g_server_dev.config_info.config_dev_name[
+				pcam->server_queue_idx];
 	ctrlcmd.vnode_id = pcam->vnode_id;
 	ctrlcmd.queue_idx = pcam->server_queue_idx;
-	ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[0];
+	ctrlcmd.config_ident = g_server_dev.config_info.config_dev_id[
+						pcam->server_queue_idx];
 
 	/* send command to config thread in usersspace, and get return value */
 	rc = msm_server_control(&g_server_dev, &ctrlcmd);
@@ -904,19 +963,20 @@
 		return rc;
 	}
 
-	/* The number of camera instance should be controlled by the
-		resource manager. Currently supporting one active instance
-		until multiple instances are supported */
-	if (atomic_read(&ps->number_pcam_active) > 0) {
-		pr_err("%s Cannot have more than one active camera %d\n",
+	/*
+	 * The number of camera instance should be controlled by the
+	 * resource manager. Currently supporting two active instances
+	 */
+	if (atomic_read(&ps->number_pcam_active) > 1) {
+		pr_err("%s Cannot have more than two active camera %d\n",
 			__func__, atomic_read(&ps->number_pcam_active));
 		return -EINVAL;
 	}
 	/* book keeping this camera session*/
-	ps->pcam_active = pcam;
+	ps->pcam_active[pcam->server_queue_idx] = pcam;
 	atomic_inc(&ps->number_pcam_active);
 
-	D("config pcam = 0x%p\n", ps->pcam_active);
+	D("config pcam = 0x%p\n", pcam);
 
 	/* initialization the media controller module*/
 	msm_mctl_init(pcam);
@@ -928,6 +988,7 @@
 static int msm_cam_server_close_session(struct msm_cam_server_dev *ps,
 	struct msm_cam_v4l2_device *pcam)
 {
+	int i;
 	int rc = 0;
 	D("%s\n", __func__);
 
@@ -936,10 +997,14 @@
 		return rc;
 	}
 
-
 	atomic_dec(&ps->number_pcam_active);
-	ps->pcam_active = NULL;
+	ps->pcam_active[pcam->server_queue_idx] = NULL;
 
+	for (i = 0; i < INTF_MAX; i++) {
+		if (ps->interface_map_table[i].mctl_handle ==
+			pcam->mctl_handle)
+			ps->interface_map_table[i].mctl_handle = 0;
+	}
 	msm_mctl_free(pcam);
 	return rc;
 }
@@ -1228,22 +1293,23 @@
 	mutex_unlock(&g_server_dev.server_lock);
 
 	if (g_server_dev.use_count == 0) {
+		int i;
 		mutex_lock(&g_server_dev.server_lock);
-		if (g_server_dev.pcam_active) {
-			struct msm_cam_media_controller *pmctl = NULL;
-			int rc;
+		for (i = 0; i < MAX_NUM_ACTIVE_CAMERA; i++) {
+			if (g_server_dev.pcam_active[i]) {
+				struct msm_cam_media_controller *pmctl = NULL;
 
-			pmctl = msm_cam_server_get_mctl(
-				g_server_dev.pcam_active->mctl_handle);
-			if (pmctl && pmctl->mctl_release) {
-				rc = pmctl->mctl_release(pmctl);
-				if (rc < 0)
-					pr_err("mctl_release fails %d\n", rc);
-				/*so that it isn't closed again*/
-				pmctl->mctl_release = NULL;
+				pmctl = msm_cam_server_get_mctl(
+				g_server_dev.pcam_active[i]->mctl_handle);
+				if (pmctl && pmctl->mctl_release) {
+					pmctl->mctl_release(pmctl);
+					/*so that it isn't closed again*/
+					pmctl->mctl_release = NULL;
+				}
+				msm_cam_server_send_error_evt(pmctl,
+					V4L2_EVENT_PRIVATE_START +
+					MSM_CAM_APP_NOTIFY_ERROR_EVENT);
 			}
-			msm_cam_server_send_error_evt(V4L2_EVENT_PRIVATE_START
-				+ MSM_CAM_APP_NOTIFY_ERROR_EVENT);
 		}
 		sub.type = V4L2_EVENT_ALL;
 		msm_server_v4l2_unsubscribe_event(
@@ -1433,49 +1499,101 @@
 	.vidioc_default = msm_ioctl_server,
 };
 
+static uint32_t msm_camera_server_find_mctl(
+		unsigned int notification, void *arg)
+{
+	int i;
+	uint32_t interface;
+
+	switch (notification) {
+	case NOTIFY_ISP_MSG_EVT:
+		if (((struct isp_msg_event *)arg)->msg_id ==
+			MSG_ID_RDI0_UPDATE_ACK)
+			interface = RDI_0;
+		else if (((struct isp_msg_event *)arg)->msg_id ==
+			MSG_ID_RDI1_UPDATE_ACK)
+			interface = RDI_1;
+		else
+			interface = PIX_0;
+		break;
+	case NOTIFY_VFE_MSG_OUT:
+		if (((struct isp_msg_output *)arg)->output_id ==
+					MSG_ID_OUTPUT_TERTIARY1)
+			interface = RDI_0;
+		else if (((struct isp_msg_output *)arg)->output_id ==
+						MSG_ID_OUTPUT_TERTIARY2)
+			interface = RDI_1;
+		else
+			interface = PIX_0;
+		break;
+	case NOTIFY_VFE_BUF_EVT: {
+		struct msm_vfe_resp *rp;
+		struct msm_frame_info *frame_info;
+		rp = (struct msm_vfe_resp *)arg;
+		frame_info = rp->evt_msg.data;
+		if (frame_info->path == VFE_MSG_OUTPUT_TERTIARY1)
+			interface = RDI_0;
+		else if (frame_info->path == VFE_MSG_OUTPUT_TERTIARY2)
+			interface = RDI_1;
+		else
+			interface = PIX_0;
+		}
+		break;
+	case NOTIFY_VFE_MSG_STATS:
+	case NOTIFY_VFE_MSG_COMP_STATS:
+	case NOTIFY_VFE_CAMIF_ERROR:
+	case NOTIFY_VFE_IRQ:
+	default:
+		interface = PIX_0;
+		break;
+	}
+
+	for (i = 0; i < INTF_MAX; i++) {
+		if (interface == g_server_dev.interface_map_table[i].interface)
+			break;
+	}
+	if (i == INTF_MAX) {
+		pr_err("%s: Cannot find valid interface map\n", __func__);
+		return -EINVAL;
+	} else
+		return g_server_dev.interface_map_table[i].mctl_handle;
+}
+
 static void msm_cam_server_subdev_notify(struct v4l2_subdev *sd,
 				unsigned int notification, void *arg)
 {
 	int rc = -EINVAL;
-	struct msm_sensor_ctrl_t *s_ctrl;
-	struct msm_camera_sensor_info *sinfo;
-	struct msm_camera_device_platform_data *camdev;
-	uint8_t csid_core = 0;
-	struct msm_cam_media_controller *p_mctl;
+	uint32_t mctl_handle = 0;
+	struct msm_cam_media_controller *p_mctl = NULL;
+	int is_gesture_evt =
+		(notification == NOTIFY_GESTURE_EVT)
+		|| (notification == NOTIFY_GESTURE_CAM_EVT);
 
-	if (notification == NOTIFY_PCLK_CHANGE ||
-		notification == NOTIFY_CSIPHY_CFG ||
-		notification == NOTIFY_CSID_CFG ||
-		notification == NOTIFY_CSIC_CFG) {
-		s_ctrl = get_sctrl(sd);
-		sinfo = (struct msm_camera_sensor_info *) s_ctrl->sensordata;
-		camdev = sinfo->pdata;
-		csid_core = camdev->csid_core;
-	}
-	if (notification != NOTIFY_GESTURE_CAM_EVT) {
-		p_mctl = v4l2_get_subdev_hostdata(sd);
-		if (p_mctl == NULL) {
-			pr_err("%s: cannot find mctl, %d\n",
-				__func__, notification);
+	if (!is_gesture_evt) {
+		mctl_handle = msm_camera_server_find_mctl(notification, arg);
+		if (mctl_handle < 0) {
+			pr_err("%s: Couldn't find mctl instance!\n", __func__);
 			return;
 		}
 	}
 	switch (notification) {
 	case NOTIFY_ISP_MSG_EVT:
 	case NOTIFY_VFE_MSG_OUT:
+	case NOTIFY_VFE_SOF_COUNT:
 	case NOTIFY_VFE_MSG_STATS:
 	case NOTIFY_VFE_MSG_COMP_STATS:
 	case NOTIFY_VFE_BUF_EVT:
-	case NOTIFY_VFE_BUF_FREE_EVT:
-		if (g_server_dev.isp_subdev[0] &&
-			g_server_dev.isp_subdev[0]->isp_notify
+		p_mctl = msm_cam_server_get_mctl(mctl_handle);
+		if (p_mctl->isp_sdev &&
+			p_mctl->isp_sdev->isp_notify
 			&& p_mctl->isp_sdev->sd)
-			rc = g_server_dev.isp_subdev[0]->isp_notify(
+			rc = p_mctl->isp_sdev->isp_notify(
 				p_mctl->isp_sdev->sd, notification, arg);
 		break;
 	case NOTIFY_VFE_IRQ:{
 		struct msm_vfe_cfg_cmd cfg_cmd;
 		struct msm_camvfe_params vfe_params;
+		p_mctl = msm_cam_server_get_mctl(mctl_handle);
 		cfg_cmd.cmd_type = CMD_VFE_PROCESS_IRQ;
 		vfe_params.vfe_cfg = &cfg_cmd;
 		vfe_params.data = arg;
@@ -1484,10 +1602,10 @@
 	}
 		break;
 	case NOTIFY_AXI_IRQ:
-		rc = v4l2_subdev_call(p_mctl->axi_sdev,
-			core, ioctl, VIDIOC_MSM_AXI_IRQ, arg);
+		rc = v4l2_subdev_call(sd, core, ioctl, VIDIOC_MSM_AXI_IRQ, arg);
 		break;
 	case NOTIFY_PCLK_CHANGE:
+		p_mctl = v4l2_get_subdev_hostdata(sd);
 		if (p_mctl->axi_sdev)
 			rc = v4l2_subdev_call(p_mctl->axi_sdev, video,
 			s_crystal_freq, *(uint32_t *)arg, 0);
@@ -1496,14 +1614,17 @@
 			s_crystal_freq, *(uint32_t *)arg, 0);
 		break;
 	case NOTIFY_CSIPHY_CFG:
+		p_mctl = v4l2_get_subdev_hostdata(sd);
 		rc = v4l2_subdev_call(p_mctl->csiphy_sdev,
 			core, ioctl, VIDIOC_MSM_CSIPHY_CFG, arg);
 		break;
 	case NOTIFY_CSID_CFG:
+		p_mctl = v4l2_get_subdev_hostdata(sd);
 		rc = v4l2_subdev_call(p_mctl->csid_sdev,
 			core, ioctl, VIDIOC_MSM_CSID_CFG, arg);
 		break;
 	case NOTIFY_CSIC_CFG:
+		p_mctl = v4l2_get_subdev_hostdata(sd);
 		rc = v4l2_subdev_call(p_mctl->csic_sdev,
 			core, ioctl, VIDIOC_MSM_CSIC_CFG, arg);
 		break;
@@ -1516,7 +1637,8 @@
 			core, ioctl, VIDIOC_MSM_GESTURE_CAM_EVT, arg);
 		break;
 	case NOTIFY_VFE_CAMIF_ERROR: {
-		msm_cam_server_send_error_evt(V4L2_EVENT_PRIVATE_START
+		p_mctl = msm_cam_server_get_mctl(mctl_handle);
+		msm_cam_server_send_error_evt(p_mctl, V4L2_EVENT_PRIVATE_START
 			+ MSM_CAM_APP_NOTIFY_ERROR_EVENT);
 		break;
 	}
@@ -2111,7 +2233,6 @@
 	spin_lock_init(&g_server_dev.intr_table_lock);
 	memset(&g_server_dev.irq_lkup_table, 0,
 			sizeof(struct irqmgr_intr_lkup_table));
-	g_server_dev.pcam_active = NULL;
 	g_server_dev.camera_info.num_cameras = 0;
 	atomic_set(&g_server_dev.number_pcam_active, 0);
 	g_server_dev.server_evt_id = 0;
@@ -2129,6 +2250,12 @@
 		queue->queue_active = 0;
 		msm_queue_init(&queue->ctrl_q, "control");
 		msm_queue_init(&queue->eventData_q, "eventdata");
+		g_server_dev.pcam_active[i] = NULL;
+	}
+
+	for (i = 0; i < INTF_MAX; i++) {
+		g_server_dev.interface_map_table[i].interface = 0x01 << i;
+		g_server_dev.interface_map_table[i].mctl_handle = 0;
 	}
 	return rc;
 }
@@ -2161,13 +2288,17 @@
 	int *p_active)
 {
 	int rc = 0;
+	int i = 0;
 	struct msm_cam_media_controller *pmctl = NULL;
-	D("%s: %p", __func__, g_server_dev.pcam_active);
 	*p_active = 0;
-	if (g_server_dev.pcam_active) {
-		D("%s: Active camera present return", __func__);
-		return 0;
+
+	for (i = 0; i < MAX_NUM_ACTIVE_CAMERA; i++) {
+		if (NULL != g_server_dev.pcam_active[i]) {
+			pr_info("%s: Active camera present return", __func__);
+			return 0;
+		}
 	}
+
 	rc = msm_cam_server_open_session(&g_server_dev, pcam);
 	if (rc < 0) {
 		pr_err("%s: cam_server_open_session failed %d\n",
@@ -2206,11 +2337,8 @@
 		return -ENODEV;
 	}
 
-	if (pmctl->mctl_release) {
-		rc = pmctl->mctl_release(pmctl);
-		if (rc < 0)
-			pr_err("mctl_release fails %d\n", rc);
-	}
+	if (pmctl->mctl_release)
+		pmctl->mctl_release(pmctl);
 
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 	kref_put(&pmctl->refcount, msm_release_ion_client);
@@ -2452,8 +2580,8 @@
 	config_cam->use_count++;
 
 	/* assume there is only one active camera possible*/
-	config_cam->p_mctl =
-		msm_cam_server_get_mctl(g_server_dev.pcam_active->mctl_handle);
+	config_cam->p_mctl = msm_cam_server_get_mctl(
+		g_server_dev.pcam_active[config_cam->dev_num]->mctl_handle);
 	if (!config_cam->p_mctl) {
 		pr_err("%s: cannot find mctl\n", __func__);
 		return -ENODEV;
@@ -2858,6 +2986,7 @@
 	msm_setup_v4l2_event_queue(
 		&config_cam->config_stat_event_queue.eventHandle,
 		config_cam->config_stat_event_queue.pvdev);
+	config_cam->dev_num = dev_num;
 
 	return rc;
 
@@ -2870,9 +2999,9 @@
 {
 	int rc = 0, i;
 	memset(&g_server_dev, 0, sizeof(struct msm_cam_server_dev));
-	/*for now just create a config 0 node
+	/*for now just create two config nodes
 	  put logic here later to know how many configs to create*/
-	g_server_dev.config_info.num_config_nodes = 1;
+	g_server_dev.config_info.num_config_nodes = 2;
 
 	rc = msm_isp_init_module(g_server_dev.config_info.num_config_nodes);
 	if (rc < 0) {
diff --git a/drivers/media/video/msm/server/msm_cam_server.h b/drivers/media/video/msm/server/msm_cam_server.h
index 8a02d32..229e9c9 100644
--- a/drivers/media/video/msm/server/msm_cam_server.h
+++ b/drivers/media/video/msm/server/msm_cam_server.h
@@ -64,4 +64,6 @@
 int msm_cam_server_request_irq(void *arg);
 int msm_cam_server_update_irqmap(
 	struct msm_cam_server_irqmap_entry *entry);
+int msm_cam_server_config_interface_map(u32 extendedmode,
+					uint32_t mctl_handle);
 #endif /* _MSM_CAM_SERVER_H */
diff --git a/drivers/media/video/msm/vfe/Makefile b/drivers/media/video/msm/vfe/Makefile
index 8068e4f..91f0e7f 100644
--- a/drivers/media/video/msm/vfe/Makefile
+++ b/drivers/media/video/msm/vfe/Makefile
@@ -16,4 +16,5 @@
 obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
 obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
 obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o
+obj-$(CONFIG_ARCH_MSM8974) += msm_vfe40.o msm_vfe40_axi.o
 obj-$(CONFIG_MSM_CAMERA_V4L2) += msm_vfe_stats_buf.o
diff --git a/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
index b6e01a59..0bd7b94 100644
--- a/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
@@ -677,6 +677,9 @@
 {
 	uint32_t *ch_info;
 	uint32_t *axi_cfg = ao + V31_AXI_RESERVED;
+	uint32_t bus_cmd = *axi_cfg;
+	int i;
+
 	/* Update the corresponding write masters for each output*/
 	ch_info = axi_cfg + V31_AXI_CFG_LEN;
 	vfe31_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
@@ -724,10 +727,23 @@
 		return -EINVAL;
 	}
 
+	axi_cfg++;
 	msm_camera_io_memcpy(vfe31_ctrl->vfebase +
 		vfe31_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
-		vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length - V31_AXI_CH_INF_LEN -
-			V31_AXI_RESERVED_LEN);
+		V31_AXI_BUS_CFG_LEN);
+	axi_cfg += V31_AXI_BUS_CFG_LEN/4;
+	for (i = 0; i < ARRAY_SIZE(vfe31_AXI_WM_CFG); i++) {
+		msm_camera_io_w(*axi_cfg,
+		vfe31_ctrl->vfebase+vfe31_AXI_WM_CFG[i]);
+		axi_cfg += 3;
+		msm_camera_io_memcpy(
+			vfe31_ctrl->vfebase+vfe31_AXI_WM_CFG[i]+12,
+							axi_cfg, 12);
+		axi_cfg += 3;
+	}
+	msm_camera_io_w(bus_cmd, vfe31_ctrl->vfebase +
+					V31_AXI_BUS_CMD_OFF);
+
 	return 0;
 }
 
@@ -3814,6 +3830,10 @@
 			/* No need to decouple AXI/VFE for VFE3.1*/
 			break;
 
+		case CMD_AXI_RESET:
+			/* No need to decouple AXI/VFE for VFE3.1*/
+			break;
+
 		default:
 			pr_err("%s Unsupported AXI configuration %x ", __func__,
 				cmd->cmd_type);
diff --git a/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
index 6396966..60db8e5 100644
--- a/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
+++ b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
@@ -216,12 +216,13 @@
 
 #define V31_OPERATION_CFG_LEN     32
 
-#define V31_AXI_OUT_OFF           0x00000038
+#define V31_AXI_BUS_CMD_OFF       0x00000038
+#define V31_AXI_OUT_OFF           0x0000003C
 #define V31_AXI_OUT_LEN           240
-#define V31_AXI_CH_INF_LEN        48
 #define V31_AXI_CFG_LEN           47
 #define V31_AXI_RESERVED            1
 #define V31_AXI_RESERVED_LEN        4
+#define V31_AXI_BUS_CFG_LEN       16
 
 #define V31_FRAME_SKIP_OFF        0x00000504
 #define V31_FRAME_SKIP_LEN        32
diff --git a/drivers/media/video/msm/vfe/msm_vfe32.c b/drivers/media/video/msm/vfe/msm_vfe32.c
index 284db08..3e01437 100644
--- a/drivers/media/video/msm/vfe/msm_vfe32.c
+++ b/drivers/media/video/msm/vfe/msm_vfe32.c
@@ -412,37 +412,36 @@
 	}
 }
 
-static void vfe32_stop(struct vfe32_ctrl_type *vfe32_ctrl)
+static void axi_disable_irq(struct axi_ctrl_t *axi_ctrl)
 {
-	unsigned long flags;
-
-	atomic_set(&vfe32_ctrl->share_ctrl->vstate, 0);
-
-	/* for reset hw modules, and send msg when reset_irq comes.*/
-	spin_lock_irqsave(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
-	vfe32_ctrl->share_ctrl->stop_ack_pending = TRUE;
-	spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
 
 	/* disable all interrupts.  */
 	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
 	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
 
 	/* clear all pending interrupts*/
 	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
 	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
 	/* Ensure the write order while writing
 	to the command register using the barrier */
 	msm_camera_io_w_mb(1,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+}
 
+static void vfe32_stop(struct vfe32_ctrl_type *vfe32_ctrl)
+{
+
+	atomic_set(&vfe32_ctrl->share_ctrl->vstate, 0);
 	/* in either continuous or snapshot mode, stop command can be issued
 	 * at any time. stop camif immediately. */
-	msm_camera_io_w(CAMIF_COMMAND_STOP_IMMEDIATELY,
+	msm_camera_io_w(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+	vfe32_ctrl->share_ctrl->operation_mode &=
+		~(vfe32_ctrl->share_ctrl->current_mode);
 }
 
 static void vfe32_subdev_notify(int id, int path, uint32_t inst_handle,
@@ -469,6 +468,8 @@
 	uint32_t *ch_info;
 	uint32_t *axi_cfg = ao+V32_AXI_BUS_FMT_OFF;
 	int vfe_mode = (mode & ~(OUTPUT_TERT1|OUTPUT_TERT2));
+	uint32_t bus_cmd = *axi_cfg;
+	int i;
 
 	/* Update the corresponding write masters for each output*/
 	ch_info = axi_cfg + V32_AXI_CFG_LEN;
@@ -543,44 +544,64 @@
 bus_cfg:
 	msm_camera_io_w(*ao, axi_ctrl->share_ctrl->vfebase +
 		VFE_BUS_IO_FORMAT_CFG);
+	axi_cfg++;
 	msm_camera_io_memcpy(axi_ctrl->share_ctrl->vfebase +
 		vfe32_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
-		vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length - V32_AXI_CH_INF_LEN
-		- V32_AXI_BUS_FMT_LEN);
+		V32_AXI_BUS_CFG_LEN);
+	axi_cfg += V32_AXI_BUS_CFG_LEN/4;
+	for (i = 0; i < ARRAY_SIZE(vfe32_AXI_WM_CFG); i++) {
+		msm_camera_io_w(*axi_cfg,
+			axi_ctrl->share_ctrl->vfebase+vfe32_AXI_WM_CFG[i]);
+		axi_cfg += 3;
+		msm_camera_io_memcpy(
+			axi_ctrl->share_ctrl->vfebase+vfe32_AXI_WM_CFG[i]+12,
+								axi_cfg, 12);
+		axi_cfg += 3;
+	}
+	msm_camera_io_w(bus_cmd, axi_ctrl->share_ctrl->vfebase +
+					V32_AXI_BUS_CMD_OFF);
 	return 0;
 }
 
+static void axi_reset_internal_variables(
+	struct axi_ctrl_t *axi_ctrl)
+{
+	unsigned long flags;
+	/* state control variables */
+	axi_ctrl->share_ctrl->start_ack_pending = FALSE;
+	atomic_set(&irq_cnt, 0);
+
+	spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+	axi_ctrl->share_ctrl->stop_ack_pending  = FALSE;
+	spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+
+	init_completion(&axi_ctrl->share_ctrl->reset_complete);
+
+	spin_lock_irqsave(&axi_ctrl->share_ctrl->update_ack_lock, flags);
+	axi_ctrl->share_ctrl->update_ack_pending = FALSE;
+	spin_unlock_irqrestore(&axi_ctrl->share_ctrl->update_ack_lock, flags);
+
+	axi_ctrl->share_ctrl->recording_state = VFE_STATE_IDLE;
+	axi_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+	atomic_set(&axi_ctrl->share_ctrl->vstate, 0);
+	atomic_set(&axi_ctrl->share_ctrl->rdi0_update_ack_pending, 0);
+	atomic_set(&axi_ctrl->share_ctrl->rdi1_update_ack_pending, 0);
+	atomic_set(&axi_ctrl->share_ctrl->rdi2_update_ack_pending, 0);
+
+	/* 0 for continuous mode, 1 for snapshot mode */
+	axi_ctrl->share_ctrl->operation_mode = 0;
+	axi_ctrl->share_ctrl->current_mode = 0;
+	axi_ctrl->share_ctrl->outpath.output_mode = 0;
+	axi_ctrl->share_ctrl->vfe_capture_count = 0;
+
+	/* this is unsigned 32 bit integer. */
+	axi_ctrl->share_ctrl->vfeFrameId = 0;
+}
+
 static void vfe32_reset_internal_variables(
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	unsigned long flags;
-	vfe32_ctrl->vfeImaskCompositePacked = 0;
-	/* state control variables */
-	vfe32_ctrl->start_ack_pending = FALSE;
-	atomic_set(&irq_cnt, 0);
-
-	spin_lock_irqsave(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
-	vfe32_ctrl->share_ctrl->stop_ack_pending  = FALSE;
-	spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
-
-	init_completion(&vfe32_ctrl->reset_complete);
-
-	spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
-	vfe32_ctrl->update_ack_pending = FALSE;
-	spin_unlock_irqrestore(&vfe32_ctrl->update_ack_lock, flags);
-
-	vfe32_ctrl->recording_state = VFE_STATE_IDLE;
-	vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
-
-	atomic_set(&vfe32_ctrl->share_ctrl->vstate, 0);
-
-	/* 0 for continuous mode, 1 for snapshot mode */
-	vfe32_ctrl->share_ctrl->operation_mode = 0;
-	vfe32_ctrl->share_ctrl->outpath.output_mode = 0;
-	vfe32_ctrl->share_ctrl->vfe_capture_count = 0;
-
-	/* this is unsigned 32 bit integer. */
-	vfe32_ctrl->share_ctrl->vfeFrameId = 0;
 	/* Stats control variables. */
 	memset(&(vfe32_ctrl->afbfStatsControl), 0,
 		sizeof(struct vfe_stats_control));
@@ -664,31 +685,30 @@
 	vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
 }
 
-static int vfe32_reset(struct vfe32_ctrl_type *vfe32_ctrl)
+static int axi_reset(struct axi_ctrl_t *axi_ctrl)
 {
-	vfe32_reset_internal_variables(vfe32_ctrl);
+	axi_reset_internal_variables(axi_ctrl);
 	/* disable all interrupts.  vfeImaskLocal is also reset to 0
 	* to begin with. */
 	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
 
 	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
 
 	/* clear all pending interrupts*/
 	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
 	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
 
 	/* Ensure the write order while writing
 	to the command register using the barrier */
-	msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
-		VFE_IRQ_CMD);
+	msm_camera_io_w_mb(1, axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
 
 	/* enable reset_ack interrupt.  */
 	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
-	vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
 
 	/* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset
 	 * is done, hardware interrupt will be generated.  VFE ist processes
@@ -698,13 +718,10 @@
 	/* Ensure the write order while writing
 	to the command register using the barrier */
 	msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+		axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
 
-	if (vfe32_ctrl->is_reset_blocking)
-		return wait_for_completion_interruptible(
-				&vfe32_ctrl->reset_complete);
-	else
-		return 0;
+	return wait_for_completion_interruptible(
+			&axi_ctrl->share_ctrl->reset_complete);
 }
 
 static int vfe32_operation_config(uint32_t *cmd,
@@ -1048,11 +1065,11 @@
 {
 	uint32_t irq_mask = 0x00E00021, irq_mask1, reg_update;
 	uint16_t vfe_operation_mode =
-		vfe32_ctrl->share_ctrl->operation_mode & ~(VFE_OUTPUTS_RDI0|
+		vfe32_ctrl->share_ctrl->current_mode & ~(VFE_OUTPUTS_RDI0|
 			VFE_OUTPUTS_RDI1);
-	vfe32_ctrl->start_ack_pending = TRUE;
+	vfe32_ctrl->share_ctrl->start_ack_pending = TRUE;
 	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
-		vfe32_ctrl->share_ctrl->operation_mode,
+		vfe32_ctrl->share_ctrl->current_mode,
 		vfe32_ctrl->share_ctrl->outpath.output_mode);
 	if (vfe32_ctrl->share_ctrl->stats_comp)
 		irq_mask |= VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK;
@@ -1073,17 +1090,29 @@
 		msm_camera_io_r_mb(vfe32_ctrl->share_ctrl->vfebase +
 			VFE_REG_UPDATE_CMD);
 
-	if (vfe32_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI0) {
+	if (vfe32_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0) {
 		irq_mask1 |= VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK;
 		msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
 			VFE_IRQ_MASK_1);
-		msm_camera_io_w_mb(reg_update|0x2, vfe32_ctrl->share_ctrl->
-			vfebase + VFE_REG_UPDATE_CMD);
+		if (!atomic_cmpxchg(
+			&vfe32_ctrl->share_ctrl->rdi0_update_ack_pending,
+				0, 1)) {
+			msm_camera_io_w_mb(reg_update|0x2,
+				vfe32_ctrl->share_ctrl->vfebase +
+				VFE_REG_UPDATE_CMD);
+		}
 	}
-	if (vfe32_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI1) {
+	if (vfe32_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1) {
 		irq_mask1 |= VFE_IRQ_STATUS1_RDI1_REG_UPDATE_MASK;
 		msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
 			VFE_IRQ_MASK_1);
+		if (!atomic_cmpxchg(
+			&vfe32_ctrl->share_ctrl->rdi1_update_ack_pending,
+				0, 1)) {
+			msm_camera_io_w_mb(reg_update|0x4,
+			vfe32_ctrl->share_ctrl->vfebase +
+			VFE_REG_UPDATE_CMD);
+		}
 		msm_camera_io_w_mb(reg_update|0x4, vfe32_ctrl->share_ctrl->
 			vfebase + VFE_REG_UPDATE_CMD);
 	}
@@ -1093,9 +1122,8 @@
 		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
 			VFE_CAMIF_COMMAND);
 	}
-	msm_camera_io_dump(vfe32_ctrl->share_ctrl->vfebase,
-		vfe32_ctrl->share_ctrl->register_total * 4);
-
+	vfe32_ctrl->share_ctrl->operation_mode |=
+		vfe32_ctrl->share_ctrl->current_mode;
 	/* Ensure the write order while writing
 	to the command register using the barrier */
 	atomic_set(&vfe32_ctrl->share_ctrl->vstate, 1);
@@ -1105,9 +1133,7 @@
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
-	vfe32_ctrl->recording_state = VFE_STATE_START_REQUESTED;
+	vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_START_REQUESTED;
 	msm_camera_io_w_mb(1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
 	return 0;
@@ -1117,11 +1143,9 @@
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	vfe32_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
+	vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
 	msm_camera_io_w_mb(1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
 	return 0;
 }
 
@@ -1134,8 +1158,6 @@
 	vfe32_ctrl->share_ctrl->vfe_capture_count =
 		vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt;
 
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
 	vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
 	msm_camera_io_w_mb(1, vfe32_ctrl->
 		share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
@@ -1148,101 +1170,13 @@
 	vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_STOP_REQUESTED;
 	msm_camera_io_w_mb(1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
 }
 
 static int vfe32_zsl(
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_comp_mask = 0;
-	/* capture command is valid for both idle and active state. */
-	irq_comp_mask	=
-		msm_camera_io_r(vfe32_ctrl->
-		share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
-	CDBG("%s:op mode %d O/P Mode %d\n", __func__,
-		vfe32_ctrl->share_ctrl->operation_mode,
-		vfe32_ctrl->share_ctrl->outpath.output_mode);
-
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-		VFE32_OUTPUT_MODE_PRIMARY) {
-		irq_comp_mask |= (
-			(0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch0)) |
-			(0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch1)));
-	} else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
-		irq_comp_mask |= (
-			(0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch0)) |
-			(0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch1)) |
-			(0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch2)));
-	}
-
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-		VFE32_OUTPUT_MODE_SECONDARY) {
-		irq_comp_mask |= ((0x1 << (vfe32_ctrl->
-				share_ctrl->outpath.out1.ch0 + 8)) |
-			(0x1 << (vfe32_ctrl->
-				share_ctrl->outpath.out1.ch1 + 8)));
-	} else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			   VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
-		irq_comp_mask |= (
-			(0x1 << (vfe32_ctrl->
-				share_ctrl->outpath.out1.ch0 + 8)) |
-			(0x1 << (vfe32_ctrl->
-				share_ctrl->outpath.out1.ch1 + 8)) |
-			(0x1 << (vfe32_ctrl->
-				share_ctrl->outpath.out1.ch2 + 8)));
-	}
-
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_PRIMARY) {
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out0.ch0]);
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out0.ch1]);
-	} else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-				VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out0.ch0]);
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out0.ch1]);
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out0.ch2]);
-	}
-
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_SECONDARY) {
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out1.ch0]);
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out1.ch1]);
-	} else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-				VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out1.ch0]);
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out1.ch1]);
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out1.ch2]);
-	}
-
-	msm_camera_io_w(irq_comp_mask,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
 	vfe32_start_common(vfe32_ctrl);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
 
 	msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x18C);
 	msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x188);
@@ -1253,28 +1187,8 @@
 	struct vfe32_ctrl_type *vfe32_ctrl,
 	uint32_t num_frames_capture)
 {
-	uint32_t irq_comp_mask = 0;
-
 	vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt = num_frames_capture;
 	vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
-
-	irq_comp_mask	=
-		msm_camera_io_r(
-			vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-		VFE32_OUTPUT_MODE_PRIMARY) {
-		irq_comp_mask |=
-			(0x1 << (vfe32_ctrl->share_ctrl->outpath.out0.ch0));
-		msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-			vfe32_AXI_WM_CFG[vfe32_ctrl->
-			share_ctrl->outpath.out0.ch0]);
-	}
-
-	msm_camera_io_w(irq_comp_mask,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
 	vfe32_start_common(vfe32_ctrl);
 	return 0;
 }
@@ -1284,75 +1198,24 @@
 	uint32_t num_frames_capture,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_comp_mask = 0;
-
 	/* capture command is valid for both idle and active state. */
 	vfe32_ctrl->share_ctrl->outpath.out1.capture_cnt = num_frames_capture;
-	if (vfe32_ctrl->share_ctrl->operation_mode ==
+	if (vfe32_ctrl->share_ctrl->current_mode ==
 			VFE_OUTPUTS_MAIN_AND_THUMB ||
-		vfe32_ctrl->share_ctrl->operation_mode ==
+		vfe32_ctrl->share_ctrl->current_mode ==
 			VFE_OUTPUTS_THUMB_AND_MAIN ||
-		vfe32_ctrl->share_ctrl->operation_mode ==
+		vfe32_ctrl->share_ctrl->current_mode ==
 			VFE_OUTPUTS_JPEG_AND_THUMB ||
-		vfe32_ctrl->share_ctrl->operation_mode ==
+		vfe32_ctrl->share_ctrl->current_mode ==
 			VFE_OUTPUTS_THUMB_AND_JPEG) {
 		vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt =
 			num_frames_capture;
 	}
 
 	vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
-	irq_comp_mask = msm_camera_io_r(
-			vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
-	if (vfe32_ctrl->share_ctrl->operation_mode ==
-			VFE_OUTPUTS_MAIN_AND_THUMB ||
-		vfe32_ctrl->share_ctrl->operation_mode ==
-			VFE_OUTPUTS_JPEG_AND_THUMB ||
-		vfe32_ctrl->share_ctrl->operation_mode ==
-			VFE_OUTPUTS_THUMB_AND_MAIN) {
-		if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_PRIMARY) {
-			irq_comp_mask |= (0x1 << vfe32_ctrl->
-				share_ctrl->outpath.out0.ch0 |
-				0x1 << vfe32_ctrl->
-				share_ctrl->outpath.out0.ch1);
-		}
-		if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_SECONDARY) {
-			irq_comp_mask |=
-				(0x1 << (vfe32_ctrl->
-					share_ctrl->outpath.out1.ch0 + 8) |
-				0x1 << (vfe32_ctrl->
-					share_ctrl->outpath.out1.ch1 + 8));
-		}
-		if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_PRIMARY) {
-			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-				vfe32_AXI_WM_CFG[vfe32_ctrl->
-				share_ctrl->outpath.out0.ch0]);
-			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-				vfe32_AXI_WM_CFG[vfe32_ctrl->
-				share_ctrl->outpath.out0.ch1]);
-		}
-		if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_SECONDARY) {
-			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-				vfe32_AXI_WM_CFG[vfe32_ctrl->
-				share_ctrl->outpath.out1.ch0]);
-			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
-				vfe32_AXI_WM_CFG[vfe32_ctrl->
-				share_ctrl->outpath.out1.ch1]);
-		}
-	}
 
 	vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
 
-	msm_camera_io_w(irq_comp_mask,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-	msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
-
 	vfe32_start_common(vfe32_ctrl);
 	/* for debug */
 	msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x18C);
@@ -1364,60 +1227,6 @@
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_comp_mask = 0, irq_mask = 0;
-
-	irq_comp_mask	=
-		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
-			VFE_IRQ_COMP_MASK);
-
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_PRIMARY) {
-		irq_comp_mask |= (
-			0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch0 |
-			0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch1);
-	} else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			   VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
-		irq_comp_mask |= (
-			0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch0 |
-			0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch1 |
-			0x1 << vfe32_ctrl->share_ctrl->outpath.out0.ch2);
-	}
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_SECONDARY) {
-		irq_comp_mask |= (
-			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
-			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch1 + 8));
-	} else if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-			VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
-		irq_comp_mask |= (
-			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
-			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
-			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch2 + 8));
-	}
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-		VFE32_OUTPUT_MODE_TERTIARY1) {
-		irq_mask = msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
-			VFE_IRQ_MASK_0);
-		irq_mask |= (0x1 << (vfe32_ctrl->share_ctrl->outpath.out2.ch0 +
-			VFE_WM_OFFSET));
-		msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
-			VFE_IRQ_MASK_0);
-	}
-	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
-		VFE32_OUTPUT_MODE_TERTIARY2) {
-		irq_mask = msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
-			VFE_IRQ_MASK_0);
-		irq_mask |= (0x1 << (vfe32_ctrl->share_ctrl->outpath.out3.ch0 +
-			VFE_WM_OFFSET));
-		msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
-			VFE_IRQ_MASK_0);
-	}
-
-	msm_camera_io_w(irq_comp_mask,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
-
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
 	vfe32_start_common(vfe32_ctrl);
 	return 0;
 }
@@ -1469,9 +1278,9 @@
 		vfe32_ctrl->update_gamma = false;
 	}
 
-	spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
-	vfe32_ctrl->update_ack_pending = TRUE;
-	spin_unlock_irqrestore(&vfe32_ctrl->update_ack_lock, flags);
+	spin_lock_irqsave(&vfe32_ctrl->share_ctrl->update_ack_lock, flags);
+	vfe32_ctrl->share_ctrl->update_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->update_ack_lock, flags);
 	/* Ensure the write order while writing
 	to the command register using the barrier */
 	msm_camera_io_w_mb(1,
@@ -1652,53 +1461,53 @@
 		b = &outch->free_buf;
 	return b;
 }
-static int vfe32_configure_pingpong_buffers(
-	int id, int path, struct vfe32_ctrl_type *vfe32_ctrl)
+static int configure_pingpong_buffers(
+	int id, int path, struct axi_ctrl_t *axi_ctrl)
 {
 	struct vfe32_output_ch *outch = NULL;
 	int rc = 0;
 	uint32_t inst_handle = 0;
 	if (path == VFE_MSG_OUTPUT_PRIMARY)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out0.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out0.inst_handle;
 	else if (path == VFE_MSG_OUTPUT_SECONDARY)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out1.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out1.inst_handle;
 	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out2.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out2.inst_handle;
 	else if (path == VFE_MSG_OUTPUT_TERTIARY2)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out3.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out3.inst_handle;
 
 	vfe32_subdev_notify(id, path, inst_handle,
-		&vfe32_ctrl->subdev, vfe32_ctrl->share_ctrl);
-	outch = vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+		&axi_ctrl->subdev, axi_ctrl->share_ctrl);
+	outch = vfe32_get_ch(path, axi_ctrl->share_ctrl);
 	if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
 		/* Configure Preview Ping Pong */
 		pr_info("%s Configure ping/pong address for %d",
 						__func__, path);
 		vfe32_put_ch_ping_addr(
-			vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
+			axi_ctrl->share_ctrl->vfebase, outch->ch0,
 			outch->ping.ch_paddr[0]);
 		vfe32_put_ch_pong_addr(
-			vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
+			axi_ctrl->share_ctrl->vfebase, outch->ch0,
 			outch->pong.ch_paddr[0]);
 
-		if ((vfe32_ctrl->share_ctrl->operation_mode !=
+		if ((axi_ctrl->share_ctrl->current_mode !=
 			VFE_OUTPUTS_RAW) && (path != VFE_MSG_OUTPUT_TERTIARY1)
 			&& (path != VFE_MSG_OUTPUT_TERTIARY2)) {
 			vfe32_put_ch_ping_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
+				axi_ctrl->share_ctrl->vfebase, outch->ch1,
 				outch->ping.ch_paddr[1]);
 			vfe32_put_ch_pong_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
+				axi_ctrl->share_ctrl->vfebase, outch->ch1,
 				outch->pong.ch_paddr[1]);
 		}
 
 		if (outch->ping.num_planes > 2)
 			vfe32_put_ch_ping_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch2,
+				axi_ctrl->share_ctrl->vfebase, outch->ch2,
 				outch->ping.ch_paddr[2]);
 		if (outch->pong.num_planes > 2)
 			vfe32_put_ch_pong_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch2,
+				axi_ctrl->share_ctrl->vfebase, outch->ch2,
 				outch->pong.ch_paddr[2]);
 
 		/* avoid stale info */
@@ -1753,7 +1562,7 @@
 	uint32_t *cmdp_local = NULL;
 	uint32_t snapshot_cnt = 0;
 	uint32_t temp1 = 0, temp2 = 0;
-	uint16_t vfe_mode = 0;
+	struct msm_camera_vfe_params_t vfe_params;
 
 	CDBG("vfe32_proc_general: cmdID = %s, length = %d\n",
 		vfe32_general_cmd[cmd->id], cmd->length);
@@ -1761,48 +1570,21 @@
 	case VFE_CMD_RESET:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
-		vfe32_ctrl->is_reset_blocking = false;
-		vfe32_reset(vfe32_ctrl);
+		vfe32_reset_internal_variables(vfe32_ctrl);
 		break;
 	case VFE_CMD_START:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
-		vfe_mode = vfe32_ctrl->share_ctrl->operation_mode
-			& ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
-		if (vfe_mode) {
-			if ((vfe32_ctrl->share_ctrl->operation_mode &
-				VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
-				(vfe32_ctrl->share_ctrl->operation_mode &
-				VFE_OUTPUTS_PREVIEW))
-				/* Configure primary channel */
-				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_START,
-					VFE_MSG_OUTPUT_PRIMARY,
-					vfe32_ctrl);
-			else
-			/* Configure secondary channel */
-				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_START,
-					VFE_MSG_OUTPUT_SECONDARY,
-					vfe32_ctrl);
+		if (copy_from_user(&vfe_params,
+				(void __user *)(cmd->value),
+				sizeof(struct msm_camera_vfe_params_t))) {
+				rc = -EFAULT;
+				goto proc_general_done;
 		}
-		if (vfe32_ctrl->share_ctrl->operation_mode &
-				VFE_OUTPUTS_RDI0)
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
-				vfe32_ctrl);
-		if (vfe32_ctrl->share_ctrl->operation_mode &
-				VFE_OUTPUTS_RDI1)
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
-				vfe32_ctrl);
 
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for preview", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
+		vfe32_ctrl->share_ctrl->current_mode =
+			vfe_params.operation_mode;
+
 		rc = vfe32_start(pmctl, vfe32_ctrl);
 		break;
 	case VFE_CMD_UPDATE:
@@ -1810,101 +1592,35 @@
 		break;
 	case VFE_CMD_CAPTURE_RAW:
 		pr_info("%s: cmdID = VFE_CMD_CAPTURE_RAW\n", __func__);
-		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
-				sizeof(uint32_t))) {
-			rc = -EFAULT;
-			goto proc_general_done;
+		if (copy_from_user(&vfe_params,
+				(void __user *)(cmd->value),
+				sizeof(struct msm_camera_vfe_params_t))) {
+				rc = -EFAULT;
+				goto proc_general_done;
 		}
-		rc = vfe32_configure_pingpong_buffers(
-			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
-			vfe32_ctrl);
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for snapshot", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
+
+		snapshot_cnt = vfe_params.capture_count;
+		vfe32_ctrl->share_ctrl->current_mode =
+			vfe_params.operation_mode;
 		rc = vfe32_capture_raw(pmctl, vfe32_ctrl, snapshot_cnt);
 		break;
 	case VFE_CMD_CAPTURE:
-		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
-				sizeof(uint32_t))) {
-			rc = -EFAULT;
-			goto proc_general_done;
+		if (copy_from_user(&vfe_params,
+				(void __user *)(cmd->value),
+				sizeof(struct msm_camera_vfe_params_t))) {
+				rc = -EFAULT;
+				goto proc_general_done;
 		}
 
-		if (vfe32_ctrl->share_ctrl->operation_mode ==
-			VFE_OUTPUTS_JPEG_AND_THUMB ||
-		vfe32_ctrl->share_ctrl->operation_mode ==
-			VFE_OUTPUTS_THUMB_AND_JPEG) {
-			if (snapshot_cnt != 1) {
-				pr_err("only support 1 inline snapshot\n");
-				rc = -EINVAL;
-				goto proc_general_done;
-			}
-			/* Configure primary channel for JPEG */
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_JPEG_CAPTURE,
-				VFE_MSG_OUTPUT_PRIMARY,
-				vfe32_ctrl);
-		} else {
-			/* Configure primary channel */
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_CAPTURE,
-				VFE_MSG_OUTPUT_PRIMARY,
-				vfe32_ctrl);
-		}
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for primary output", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
-		/* Configure secondary channel */
-		rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
-				vfe32_ctrl);
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for secondary output", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
+		snapshot_cnt = vfe_params.capture_count;
+		vfe32_ctrl->share_ctrl->current_mode =
+			vfe_params.operation_mode;
+
 		rc = vfe32_capture(pmctl, snapshot_cnt, vfe32_ctrl);
 		break;
 	case VFE_CMD_START_RECORDING:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
-		if (copy_from_user(&temp1, (void __user *)(cmd->value),
-				sizeof(uint32_t))) {
-			pr_err("%s Error copying inst_handle for recording\n",
-				__func__);
-			rc = -EFAULT;
-			goto proc_general_done;
-		}
-		if (vfe32_ctrl->share_ctrl->operation_mode &
-			VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
-			vfe32_ctrl->share_ctrl->outpath.out1.inst_handle =
-				temp1;
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START_RECORDING,
-				VFE_MSG_OUTPUT_SECONDARY,
-				vfe32_ctrl);
-		} else if (vfe32_ctrl->share_ctrl->operation_mode &
-			VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
-			vfe32_ctrl->share_ctrl->outpath.out0.inst_handle =
-				temp1;
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START_RECORDING,
-				VFE_MSG_OUTPUT_PRIMARY,
-				vfe32_ctrl);
-		}
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				" for video", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
 		rc = vfe32_start_recording(pmctl, vfe32_ctrl);
 		break;
 	case VFE_CMD_STOP_RECORDING:
@@ -2446,23 +2162,7 @@
 		break;
 
 	case VFE_CMD_LIVESHOT:
-		if (copy_from_user(&temp1, (void __user *)(cmd->value),
-				sizeof(uint32_t))) {
-			pr_err("%s Error copying inst_handle for liveshot ",
-				__func__);
-			rc = -EFAULT;
-			goto proc_general_done;
-		}
-		vfe32_ctrl->share_ctrl->outpath.out0.inst_handle = temp1;
 		/* Configure primary channel */
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_CAPTURE,
-					VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for primary output", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
 		vfe32_start_liveshot(pmctl, vfe32_ctrl);
 		break;
 
@@ -2943,6 +2643,15 @@
 	case VFE_CMD_STOP:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
+		if (copy_from_user(&vfe_params,
+				(void __user *)(cmd->value),
+				sizeof(struct msm_camera_vfe_params_t))) {
+				rc = -EFAULT;
+				goto proc_general_done;
+		}
+
+		vfe32_ctrl->share_ctrl->current_mode =
+			vfe_params.operation_mode;
 		vfe32_stop(vfe32_ctrl);
 		break;
 
@@ -2986,14 +2695,15 @@
 		break;
 
 	case VFE_CMD_ZSL:
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
-			VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
-		if (rc < 0)
-			goto proc_general_done;
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
-			VFE_MSG_OUTPUT_SECONDARY, vfe32_ctrl);
-		if (rc < 0)
-			goto proc_general_done;
+		if (copy_from_user(&vfe_params,
+				(void __user *)(cmd->value),
+				sizeof(struct msm_camera_vfe_params_t))) {
+				rc = -EFAULT;
+				goto proc_general_done;
+		}
+
+		vfe32_ctrl->share_ctrl->current_mode =
+			vfe_params.operation_mode;
 
 		rc = vfe32_zsl(pmctl, vfe32_ctrl);
 		break;
@@ -3258,12 +2968,6 @@
 		CDBG("%s Stopping liveshot ", __func__);
 		vfe32_stop_liveshot(pmctl, vfe32_ctrl);
 		break;
-	case VFE_CMD_RESET_2:
-		CDBG("vfe32_proc_general: cmdID = %s\n",
-			vfe32_general_cmd[cmd->id]);
-		vfe32_ctrl->is_reset_blocking = true;
-		vfe32_reset(vfe32_ctrl);
-		break;
 	default:
 		if (cmd->length != vfe32_cmd[cmd->id].length)
 			return -EINVAL;
@@ -3327,7 +3031,8 @@
 {
 	unsigned long flags;
 
-	if (vfe32_ctrl->recording_state == VFE_STATE_START_REQUESTED) {
+	if (vfe32_ctrl->share_ctrl->recording_state ==
+				VFE_STATE_START_REQUESTED) {
 		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
 			msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase +
@@ -3345,11 +3050,11 @@
 				vfe32_AXI_WM_CFG[vfe32_ctrl->
 				share_ctrl->outpath.out1.ch1]);
 		}
-		vfe32_ctrl->recording_state = VFE_STATE_STARTED;
+		vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_STARTED;
 		msm_camera_io_w_mb(1,
 			vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
 		CDBG("start video triggered .\n");
-	} else if (vfe32_ctrl->recording_state ==
+	} else if (vfe32_ctrl->share_ctrl->recording_state ==
 			VFE_STATE_STOP_REQUESTED) {
 		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
@@ -3371,40 +3076,47 @@
 		CDBG("stop video triggered .\n");
 	}
 
-	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
-	if (vfe32_ctrl->start_ack_pending == TRUE) {
-		vfe32_ctrl->start_ack_pending = FALSE;
-		spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
+	spin_lock_irqsave(&vfe32_ctrl->share_ctrl->start_ack_lock, flags);
+	if (vfe32_ctrl->share_ctrl->start_ack_pending == TRUE) {
+		vfe32_ctrl->share_ctrl->start_ack_pending = FALSE;
+		spin_unlock_irqrestore(
+			&vfe32_ctrl->share_ctrl->start_ack_lock, flags);
 		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
 			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
 	} else {
-		spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
-		if (vfe32_ctrl->recording_state ==
+		spin_unlock_irqrestore(
+			&vfe32_ctrl->share_ctrl->start_ack_lock, flags);
+		if (vfe32_ctrl->share_ctrl->recording_state ==
 				VFE_STATE_STOP_REQUESTED) {
-			vfe32_ctrl->recording_state = VFE_STATE_STOPPED;
+			vfe32_ctrl->share_ctrl->recording_state =
+						VFE_STATE_STOPPED;
 			/* request a reg update and send STOP_REC_ACK
 			 * when we process the next reg update irq.
 			 */
 			msm_camera_io_w_mb(1,
 			vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-		} else if (vfe32_ctrl->recording_state ==
+		} else if (vfe32_ctrl->share_ctrl->recording_state ==
 					VFE_STATE_STOPPED) {
 			vfe32_send_isp_msg(&vfe32_ctrl->subdev,
 				vfe32_ctrl->share_ctrl->vfeFrameId,
 				MSG_ID_STOP_REC_ACK);
-			vfe32_ctrl->recording_state = VFE_STATE_IDLE;
+			vfe32_ctrl->share_ctrl->recording_state =
+						VFE_STATE_IDLE;
 		}
-		spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
-		if (vfe32_ctrl->update_ack_pending == TRUE) {
-			vfe32_ctrl->update_ack_pending = FALSE;
+		spin_lock_irqsave(
+			&vfe32_ctrl->share_ctrl->update_ack_lock, flags);
+		if (vfe32_ctrl->share_ctrl->update_ack_pending == TRUE) {
+			vfe32_ctrl->share_ctrl->update_ack_pending = FALSE;
 			spin_unlock_irqrestore(
-				&vfe32_ctrl->update_ack_lock, flags);
+				&vfe32_ctrl->share_ctrl->update_ack_lock,
+								flags);
 			vfe32_send_isp_msg(&vfe32_ctrl->subdev,
 				vfe32_ctrl->share_ctrl->vfeFrameId,
 				MSG_ID_UPDATE_ACK);
 		} else {
 			spin_unlock_irqrestore(
-				&vfe32_ctrl->update_ack_lock, flags);
+				&vfe32_ctrl->share_ctrl->update_ack_lock,
+								flags);
 		}
 	}
 
@@ -3466,13 +3178,13 @@
 		break;
 	}
 
-	if ((vfe32_ctrl->share_ctrl->operation_mode ==
+	if ((vfe32_ctrl->share_ctrl->operation_mode &
 			VFE_OUTPUTS_THUMB_AND_MAIN) ||
-		(vfe32_ctrl->share_ctrl->operation_mode ==
+		(vfe32_ctrl->share_ctrl->operation_mode &
 			VFE_OUTPUTS_MAIN_AND_THUMB) ||
-		(vfe32_ctrl->share_ctrl->operation_mode ==
+		(vfe32_ctrl->share_ctrl->operation_mode &
 			VFE_OUTPUTS_THUMB_AND_JPEG) ||
-		(vfe32_ctrl->share_ctrl->operation_mode ==
+		(vfe32_ctrl->share_ctrl->operation_mode &
 			VFE_OUTPUTS_JPEG_AND_THUMB)) {
 		/* in snapshot mode */
 		/* later we need to add check for live snapshot mode. */
@@ -3524,34 +3236,22 @@
 static void vfe32_process_rdi0_reg_update_irq(
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	unsigned long flags;
-	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
-	if (vfe32_ctrl->start_ack_pending == TRUE) {
-		vfe32_ctrl->start_ack_pending = FALSE;
-		spin_unlock_irqrestore(
-				&vfe32_ctrl->start_ack_lock, flags);
+	if (atomic_cmpxchg(
+		&vfe32_ctrl->share_ctrl->rdi0_update_ack_pending, 1, 0)) {
 		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
-			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
-	} else {
-		spin_unlock_irqrestore(
-				&vfe32_ctrl->start_ack_lock, flags);
+			vfe32_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_RDI0_UPDATE_ACK);
 	}
 }
 
 static void vfe32_process_rdi1_reg_update_irq(
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	unsigned long flags;
-	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
-	if (vfe32_ctrl->start_ack_pending == TRUE) {
-		vfe32_ctrl->start_ack_pending = FALSE;
-		spin_unlock_irqrestore(
-				&vfe32_ctrl->start_ack_lock, flags);
+	if (atomic_cmpxchg(
+		&vfe32_ctrl->share_ctrl->rdi1_update_ack_pending, 1, 0)) {
 		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
-			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
-	} else {
-		spin_unlock_irqrestore(
-				&vfe32_ctrl->start_ack_lock, flags);
+			vfe32_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_RDI1_UPDATE_ACK);
 	}
 }
 
@@ -3657,25 +3357,20 @@
 		/* reload all write masters. (frame & line)*/
 		msm_camera_io_w(0x7FFF,
 			vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_CMD);
-		if (vfe32_ctrl->is_reset_blocking)
-			complete(&vfe32_ctrl->reset_complete);
-		else
-			vfe32_send_isp_msg(&vfe32_ctrl->subdev,
-				vfe32_ctrl->share_ctrl->vfeFrameId,
-				MSG_ID_RESET_ACK);
+		complete(&vfe32_ctrl->share_ctrl->reset_complete);
 	}
 }
 
 static void vfe32_process_camif_sof_irq(
 		struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	if (vfe32_ctrl->share_ctrl->operation_mode ==
+	if (vfe32_ctrl->share_ctrl->operation_mode &
 		VFE_OUTPUTS_RAW) {
-		if (vfe32_ctrl->start_ack_pending) {
+		if (vfe32_ctrl->share_ctrl->start_ack_pending) {
 			vfe32_send_isp_msg(&vfe32_ctrl->subdev,
 				vfe32_ctrl->share_ctrl->vfeFrameId,
 				MSG_ID_START_ACK);
-			vfe32_ctrl->start_ack_pending = FALSE;
+			vfe32_ctrl->share_ctrl->start_ack_pending = FALSE;
 		}
 		vfe32_ctrl->share_ctrl->vfe_capture_count--;
 		/* if last frame to be captured: */
@@ -3691,11 +3386,14 @@
 			VFE_MODE_OF_OPERATION_VIDEO) &&
 		(vfe32_ctrl->share_ctrl->vfeFrameId %
 			vfe32_ctrl->hfr_mode != 0)) {
-		vfe32_ctrl->share_ctrl->vfeFrameId++;
+		if (vfe32_ctrl->vfe_sof_count_enable)
+			vfe32_ctrl->share_ctrl->vfeFrameId++;
 		CDBG("Skip the SOF notification when HFR enabled\n");
 		return;
 	}
-	vfe32_ctrl->share_ctrl->vfeFrameId++;
+	if (vfe32_ctrl->vfe_sof_count_enable)
+		vfe32_ctrl->share_ctrl->vfeFrameId++;
+
 	vfe32_send_isp_msg(&vfe32_ctrl->subdev,
 		vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_SOF_ACK);
 	CDBG("camif_sof_irq, frameId = %d\n",
@@ -4521,7 +4219,6 @@
 	struct vfe32_ctrl_type *vfe32_ctrl, uint32_t irqstatus)
 {
 	uint32_t status_bits = VFE_COM_STATUS & irqstatus;
-
 	if ((vfe32_ctrl->hfr_mode != HFR_MODE_OFF) &&
 		(vfe32_ctrl->share_ctrl->vfeFrameId %
 		 vfe32_ctrl->hfr_mode != 0)) {
@@ -4692,6 +4389,11 @@
 				NOTIFY_VFE_IRQ,
 				(void *)VFE_IMASK_WHILE_STOPPING_1);
 
+		if (atomic_read(&axi_ctrl->share_ctrl->handle_axi_irq))
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_AXI_IRQ,
+				(void *)qcmd->vfeInterruptStatus0);
+
 		if (atomic_read(&axi_ctrl->share_ctrl->vstate)) {
 			if (qcmd->vfeInterruptStatus1 &
 					VFE32_IMASK_ERROR_ONLY_1) {
@@ -4701,9 +4403,6 @@
 					qcmd->vfeInterruptStatus1 &
 					VFE32_IMASK_ERROR_ONLY_1);
 			}
-			v4l2_subdev_notify(&axi_ctrl->subdev,
-				NOTIFY_AXI_IRQ,
-				(void *)qcmd->vfeInterruptStatus0);
 
 			/* then process stats irq. */
 			if (axi_ctrl->share_ctrl->stats_comp) {
@@ -4993,7 +4692,9 @@
 		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE &&
 		cmd->cmd_type != CMD_STATS_BG_BUF_RELEASE &&
 		cmd->cmd_type != CMD_STATS_BF_BUF_RELEASE &&
-		cmd->cmd_type != CMD_STATS_BHIST_BUF_RELEASE) {
+		cmd->cmd_type != CMD_STATS_BHIST_BUF_RELEASE &&
+		cmd->cmd_type != CMD_VFE_SOF_COUNT_UPDATE &&
+		cmd->cmd_type != CMD_VFE_COUNT_SOF_ENABLE) {
 			if (copy_from_user(&vfecmd,
 					(void __user *)(cmd->value),
 					sizeof(vfecmd))) {
@@ -5073,6 +4774,19 @@
 	case CMD_GENERAL:
 		rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
 	break;
+	case CMD_VFE_COUNT_SOF_ENABLE: {
+		int enable = *((int *)cmd->value);
+		if (enable)
+			vfe32_ctrl->vfe_sof_count_enable = TRUE;
+		else
+			vfe32_ctrl->vfe_sof_count_enable = false;
+	}
+	break;
+	case CMD_VFE_SOF_COUNT_UPDATE:
+		if (!vfe32_ctrl->vfe_sof_count_enable)
+			vfe32_ctrl->share_ctrl->vfeFrameId =
+			*((uint32_t *)vfe_params->data);
+	break;
 	case CMD_CONFIG_PING_ADDR: {
 		int path = *((int *)cmd->value);
 		struct vfe32_output_ch *outch =
@@ -5219,16 +4933,17 @@
 		(struct vfe32_ctrl_type *)v4l2_get_subdevdata(sd);
 
 	spin_lock_init(&vfe32_ctrl->share_ctrl->stop_flag_lock);
+	spin_lock_init(&vfe32_ctrl->share_ctrl->abort_lock);
 	spin_lock_init(&vfe32_ctrl->state_lock);
-	spin_lock_init(&vfe32_ctrl->io_lock);
-	spin_lock_init(&vfe32_ctrl->update_ack_lock);
-	spin_lock_init(&vfe32_ctrl->start_ack_lock);
+	spin_lock_init(&vfe32_ctrl->share_ctrl->update_ack_lock);
+	spin_lock_init(&vfe32_ctrl->share_ctrl->start_ack_lock);
 	spin_lock_init(&vfe32_ctrl->stats_bufq_lock);
 
 	vfe32_ctrl->update_linear = false;
 	vfe32_ctrl->update_rolloff = false;
 	vfe32_ctrl->update_la = false;
 	vfe32_ctrl->update_gamma = false;
+	vfe32_ctrl->vfe_sof_count_enable = false;
 	vfe32_ctrl->hfr_mode = HFR_MODE_OFF;
 
 	memset(&vfe32_ctrl->stats_ctrl, 0, sizeof(struct msm_stats_bufq_ctrl));
@@ -5273,15 +4988,323 @@
 		vfe32_ctrl->share_ctrl->vfebase = NULL;
 }
 
-void axi_start(struct axi_ctrl_t *axi_ctrl)
+void axi_abort(struct axi_ctrl_t *axi_ctrl)
 {
-	uint16_t operation_mode =
+	uint8_t  axi_busy_flag = true;
+	/* axi halt command. */
+	msm_camera_io_w(AXI_HALT,
+		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+	wmb();
+	while (axi_busy_flag) {
+		if (msm_camera_io_r(
+			axi_ctrl->share_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
+			axi_busy_flag = false;
+	}
+	/* Ensure the write order while writing
+	* to the command register using the barrier */
+	msm_camera_io_w_mb(AXI_HALT_CLEAR,
+		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+
+	/* after axi halt, then ok to apply global reset.
+	* enable reset_ack and async timer interrupt only while
+	* stopping the pipeline.*/
+	msm_camera_io_w(0xf0000000,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Ensure the write order while writing
+	* to the command register using the barrier */
+	msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD,
+		axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+int axi_config_buffers(struct axi_ctrl_t *axi_ctrl,
+	struct msm_camera_vfe_params_t vfe_params)
+{
+	uint16_t vfe_mode = axi_ctrl->share_ctrl->current_mode
+			& ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
+	int rc = 0;
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW:
+		if (vfe_mode) {
+			if ((axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
+				(axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_PREVIEW))
+				/* Configure primary channel */
+				rc = configure_pingpong_buffers(
+					VFE_MSG_START,
+					VFE_MSG_OUTPUT_PRIMARY,
+					axi_ctrl);
+			else
+			/* Configure secondary channel */
+				rc = configure_pingpong_buffers(
+					VFE_MSG_START,
+					VFE_MSG_OUTPUT_SECONDARY,
+					axi_ctrl);
+		}
+		if (axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_RDI0)
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
+				axi_ctrl);
+		if (axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_RDI1)
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
+				axi_ctrl);
+
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for preview",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_RAW_CAPTURE:
+		rc = configure_pingpong_buffers(
+			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+			axi_ctrl);
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for snapshot",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_ZSL:
+		rc = configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+		if (rc < 0)
+			goto config_done;
+		rc = configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_SECONDARY, axi_ctrl);
+		if (rc < 0)
+			goto config_done;
+		break;
+	case AXI_CMD_RECORD:
+		if (axi_ctrl->share_ctrl->current_mode &
+			VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+			axi_ctrl->share_ctrl->outpath.out1.inst_handle =
+				vfe_params.inst_handle;
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_SECONDARY,
+				axi_ctrl);
+		} else if (axi_ctrl->share_ctrl->current_mode &
+			VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+			axi_ctrl->share_ctrl->outpath.out0.inst_handle =
+				vfe_params.inst_handle;
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_PRIMARY,
+				axi_ctrl);
+		}
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for video",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_LIVESHOT:
+		axi_ctrl->share_ctrl->outpath.out0.inst_handle =
+			vfe_params.inst_handle;
+		rc = configure_pingpong_buffers(VFE_MSG_CAPTURE,
+					VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for primary output",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_CAPTURE:
+		if (axi_ctrl->share_ctrl->current_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		axi_ctrl->share_ctrl->current_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) {
+
+			/* Configure primary channel for JPEG */
+			rc = configure_pingpong_buffers(
+				VFE_MSG_JPEG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				axi_ctrl);
+		} else {
+			/* Configure primary channel */
+			rc = configure_pingpong_buffers(
+				VFE_MSG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				axi_ctrl);
+		}
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for primary output",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		/* Configure secondary channel */
+		rc = configure_pingpong_buffers(
+				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+				axi_ctrl);
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for secondary output",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+
+	}
+config_done:
+	return rc;
+}
+
+void axi_start(struct msm_cam_media_controller *pmctl,
+	struct axi_ctrl_t *axi_ctrl, struct msm_camera_vfe_params_t vfe_params)
+{
+	uint32_t irq_comp_mask = 0, irq_mask = 0;
+	int rc = 0;
+	rc = axi_config_buffers(axi_ctrl, vfe_params);
+	if (rc < 0)
+		return;
+
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+		break;
+	case AXI_CMD_CAPTURE:
+	case AXI_CMD_RAW_CAPTURE:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+		break;
+	case AXI_CMD_RECORD:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+		return;
+	case AXI_CMD_ZSL:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
+		break;
+	case AXI_CMD_LIVESHOT:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
+		return;
+	default:
+		return;
+	}
+
+	irq_comp_mask =
+		msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_COMP_MASK);
+
+	if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE32_OUTPUT_MODE_PRIMARY) {
+		irq_comp_mask |= (
+			0x1 << axi_ctrl->share_ctrl->outpath.out0.ch0 |
+			0x1 << axi_ctrl->share_ctrl->outpath.out0.ch1);
+	} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+			   VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			0x1 << axi_ctrl->share_ctrl->outpath.out0.ch0 |
+			0x1 << axi_ctrl->share_ctrl->outpath.out0.ch1 |
+			0x1 << axi_ctrl->share_ctrl->outpath.out0.ch2);
+	}
+	if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE32_OUTPUT_MODE_SECONDARY) {
+		irq_comp_mask |= (
+			0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+			0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch1 + 8));
+	} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+			0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
+			0x1 << (axi_ctrl->share_ctrl->outpath.out1.ch2 + 8));
+	}
+	if (axi_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY1) {
+		irq_mask = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+		irq_mask |= (0x1 << (axi_ctrl->share_ctrl->outpath.out2.ch0 +
+			VFE_WM_OFFSET));
+		msm_camera_io_w(irq_mask, axi_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+	}
+	if (axi_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY2) {
+		irq_mask = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+		irq_mask |= (0x1 << (axi_ctrl->share_ctrl->outpath.out3.ch0 +
+			VFE_WM_OFFSET));
+		msm_camera_io_w(irq_mask, axi_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+	}
+
+	msm_camera_io_w(irq_comp_mask,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW: {
+		uint16_t operation_mode =
 		(axi_ctrl->share_ctrl->operation_mode &
 		~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1));
 
-	switch (operation_mode) {
-	case VFE_OUTPUTS_PREVIEW:
-	case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+		switch (operation_mode) {
+		case VFE_OUTPUTS_PREVIEW:
+		case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+			if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_PRIMARY) {
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch0]);
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch1]);
+			} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+					VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch0]);
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch1]);
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch2]);
+			}
+			break;
+		default:
+			if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_SECONDARY) {
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch0]);
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch1]);
+			} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch0]);
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch1]);
+				msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch2]);
+			}
+			break;
+			}
+		}
+		break;
+	default:
 		if (axi_ctrl->share_ctrl->outpath.output_mode &
 			VFE32_OUTPUT_MODE_PRIMARY) {
 			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
@@ -5302,8 +5325,7 @@
 				vfe32_AXI_WM_CFG[axi_ctrl->
 				share_ctrl->outpath.out0.ch2]);
 		}
-		break;
-	default:
+
 		if (axi_ctrl->share_ctrl->outpath.output_mode &
 			VFE32_OUTPUT_MODE_SECONDARY) {
 			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
@@ -5326,47 +5348,168 @@
 		}
 		break;
 	}
-
-	if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI0)
+	if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0)
 		msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
 			vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
 			outpath.out2.ch0]);
-	if (axi_ctrl->share_ctrl->operation_mode & VFE_OUTPUTS_RDI1)
+	if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1)
 		msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
 			vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
 			outpath.out3.ch0]);
-
+	atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 1);
 }
 
-void axi_stop(struct axi_ctrl_t *axi_ctrl)
+void axi_stop(struct msm_cam_media_controller *pmctl,
+	struct axi_ctrl_t *axi_ctrl, struct msm_camera_vfe_params_t vfe_params)
 {
-	uint8_t  axiBusyFlag = true;
-	/* axi halt command. */
-	msm_camera_io_w(AXI_HALT,
-		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
-	wmb();
-	while (axiBusyFlag) {
-		if (msm_camera_io_r(
-			axi_ctrl->share_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
-			axiBusyFlag = false;
+	uint32_t reg_update = 0;
+	unsigned long flags;
+	uint32_t operation_mode =
+	axi_ctrl->share_ctrl->current_mode & ~(VFE_OUTPUTS_RDI0|
+		VFE_OUTPUTS_RDI1);
+
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW:
+	case AXI_CMD_CAPTURE:
+	case AXI_CMD_RAW_CAPTURE:
+	case AXI_CMD_ZSL:
+		break;
+	case AXI_CMD_RECORD:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+		return;
+	case AXI_CMD_LIVESHOT:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+		return;
+	default:
+		return;
 	}
-	/* Ensure the write order while writing
-	to the command register using the barrier */
-	msm_camera_io_w_mb(AXI_HALT_CLEAR,
-		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
 
-	/* after axi halt, then ok to apply global reset. */
-	/* enable reset_ack and async timer interrupt only while
-	stopping the pipeline.*/
-	msm_camera_io_w(0xf0000000,
-		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
-	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
-		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+	if (!axi_ctrl->share_ctrl->skip_abort) {
+		atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 0);
+		axi_disable_irq(axi_ctrl);
+	}
 
-	/* Ensure the write order while writing
-	to the command register using the barrier */
-	msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD,
-		axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+	spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+	axi_ctrl->share_ctrl->stop_ack_pending  = TRUE;
+	spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW: {
+		switch (operation_mode) {
+		case VFE_OUTPUTS_PREVIEW:
+		case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+			if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_PRIMARY) {
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch0]);
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch1]);
+			} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+					VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch0]);
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch1]);
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out0.ch2]);
+			}
+			break;
+		default:
+			if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_SECONDARY) {
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch0]);
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch1]);
+			} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch0]);
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch1]);
+				msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase
+					+ vfe32_AXI_WM_CFG[axi_ctrl->
+					share_ctrl->outpath.out1.ch2]);
+			}
+			break;
+			}
+		}
+		break;
+	default:
+		if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE32_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch2]);
+		}
+
+		if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE32_OUTPUT_MODE_SECONDARY) {
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+			msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+				vfe32_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch2]);
+		}
+		break;
+	}
+	if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0)
+		msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
+			outpath.out2.ch0]);
+	if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1)
+		msm_camera_io_w(0, axi_ctrl->share_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[axi_ctrl->share_ctrl->
+			outpath.out3.ch0]);
+
+	if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI0)
+		reg_update |= 0x2;
+	if (axi_ctrl->share_ctrl->current_mode & VFE_OUTPUTS_RDI1)
+		reg_update |= 0x4;
+
+	if (operation_mode)
+		reg_update |= 0x1;
+	msm_camera_io_w_mb(reg_update,
+		axi_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	if (!axi_ctrl->share_ctrl->skip_abort)
+		axi_abort(axi_ctrl);
+
 }
 
 static int msm_axi_config(struct v4l2_subdev *sd, void __user *arg)
@@ -5374,7 +5517,10 @@
 	struct msm_vfe_cfg_cmd cfgcmd;
 	struct msm_isp_cmd vfecmd;
 	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	struct msm_cam_media_controller *pmctl =
+		(struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
 	int rc = 0, vfe_cmd_type = 0, rdi_mode = 0;
+	unsigned long flags;
 
 	if (!axi_ctrl->share_ctrl->vfebase) {
 		pr_err("%s: base address unmapped\n", __func__);
@@ -5561,11 +5707,42 @@
 		pr_err("%s Invalid/Unsupported AXI configuration %x",
 			__func__, cfgcmd.cmd_type);
 		break;
-	case CMD_AXI_START:
-		axi_start(axi_ctrl);
+	case CMD_AXI_START: {
+		struct msm_camera_vfe_params_t vfe_params;
+		if (copy_from_user(&vfe_params,
+				(void __user *)(vfecmd.value),
+				sizeof(struct msm_camera_vfe_params_t))) {
+				return -EFAULT;
+		}
+		axi_ctrl->share_ctrl->current_mode =
+			vfe_params.operation_mode;
+		spin_lock_irqsave(&axi_ctrl->share_ctrl->abort_lock, flags);
+		axi_ctrl->share_ctrl->skip_abort =
+			vfe_params.skip_abort;
+		spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
+			flags);
+		axi_start(pmctl, axi_ctrl, vfe_params);
+		}
 		break;
-	case CMD_AXI_STOP:
-		axi_stop(axi_ctrl);
+	case CMD_AXI_STOP: {
+		struct msm_camera_vfe_params_t vfe_params;
+		if (copy_from_user(&vfe_params,
+				(void __user *)(vfecmd.value),
+				sizeof(struct msm_camera_vfe_params_t))) {
+				return -EFAULT;
+		}
+		axi_ctrl->share_ctrl->current_mode =
+			vfe_params.operation_mode;
+		spin_lock_irqsave(&axi_ctrl->share_ctrl->abort_lock, flags);
+		axi_ctrl->share_ctrl->skip_abort =
+			vfe_params.skip_abort;
+		spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
+			flags);
+		axi_stop(pmctl, axi_ctrl, vfe_params);
+		}
+		break;
+	case CMD_AXI_RESET:
+		axi_reset(axi_ctrl);
 		break;
 	default:
 		pr_err("%s Unsupported AXI configuration %x ", __func__,
@@ -5579,6 +5756,7 @@
 {
 	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
 	uint32_t irqstatus = (uint32_t) arg;
+	unsigned long flags;
 
 	if (!axi_ctrl->share_ctrl->vfebase) {
 		pr_err("%s: base address unmapped\n", __func__);
@@ -5609,7 +5787,8 @@
 
 	/* in snapshot mode if done then send
 	snapshot done message */
-	if (axi_ctrl->share_ctrl->operation_mode ==
+	if (
+		axi_ctrl->share_ctrl->operation_mode ==
 			VFE_OUTPUTS_THUMB_AND_MAIN ||
 		axi_ctrl->share_ctrl->operation_mode ==
 			VFE_OUTPUTS_MAIN_AND_THUMB ||
@@ -5626,6 +5805,17 @@
 				CAMIF_COMMAND_STOP_IMMEDIATELY,
 				axi_ctrl->share_ctrl->vfebase +
 				VFE_CAMIF_COMMAND);
+			spin_lock_irqsave(&axi_ctrl->share_ctrl->abort_lock,
+				flags);
+			if (axi_ctrl->share_ctrl->skip_abort) {
+				spin_unlock_irqrestore(&axi_ctrl->share_ctrl->
+					abort_lock, flags);
+				atomic_set(&axi_ctrl->share_ctrl->
+					handle_axi_irq, 0);
+				axi_disable_irq(axi_ctrl);
+			} else
+				spin_unlock_irqrestore(&axi_ctrl->share_ctrl->
+					abort_lock, flags);
 			vfe32_send_isp_msg(&axi_ctrl->subdev,
 				axi_ctrl->share_ctrl->vfeFrameId,
 				MSG_ID_SNAPSHOT_DONE);
@@ -5704,7 +5894,9 @@
 		rc = 0;
 		break;
 	default:
-		pr_err("%s: command not found\n", __func__);
+		pr_err("%s: command %d not found\n", __func__,
+						_IOC_NR(cmd));
+		break;
 	}
 	return rc;
 }
diff --git a/drivers/media/video/msm/vfe/msm_vfe32.h b/drivers/media/video/msm/vfe/msm_vfe32.h
index 1795aa2..f4b7edb 100644
--- a/drivers/media/video/msm/vfe/msm_vfe32.h
+++ b/drivers/media/video/msm/vfe/msm_vfe32.h
@@ -246,12 +246,13 @@
 
 #define V32_OPERATION_CFG_LEN     44
 
-#define V32_AXI_OUT_OFF           0x00000038
+#define V32_AXI_BUS_CMD_OFF       0x00000038
+#define V32_AXI_OUT_OFF           0x0000003C
 #define V32_AXI_OUT_LEN           240
-#define V32_AXI_CH_INF_LEN        48
 #define V32_AXI_CFG_LEN           47
-#define V32_AXI_BUS_FMT_OFF    1
-#define V32_AXI_BUS_FMT_LEN    4
+#define V32_AXI_BUS_FMT_OFF       1
+#define V32_AXI_BUS_FMT_LEN       4
+#define V32_AXI_BUS_CFG_LEN       16
 
 #define V32_FRAME_SKIP_OFF        0x00000504
 #define V32_FRAME_SKIP_LEN        32
@@ -940,21 +941,37 @@
 	uint32_t register_total;
 
 	atomic_t vstate;
+	atomic_t handle_axi_irq;
 	uint32_t vfeFrameId;
 	uint32_t stats_comp;
 	spinlock_t  stop_flag_lock;
+	spinlock_t  abort_lock;
 	int8_t stop_ack_pending;
 	enum vfe_output_state liveshot_state;
 	uint32_t vfe_capture_count;
 
-	uint16_t operation_mode;     /* streaming or snapshot */
+	uint32_t operation_mode;     /* streaming or snapshot */
+	uint32_t current_mode;
 	struct vfe32_output_path outpath;
 
-	uint32_t ref_count;
+	uint16_t port_info;
+	uint32_t skip_abort;
 	spinlock_t  sd_notify_lock;
 
+	struct completion reset_complete;
+
+	spinlock_t  update_ack_lock;
+	spinlock_t  start_ack_lock;
+
 	struct axi_ctrl_t *axi_ctrl;
 	struct vfe32_ctrl_type *vfe32_ctrl;
+	int8_t start_ack_pending;
+	int8_t update_ack_pending;
+	enum vfe_output_state recording_state;
+
+	atomic_t rdi0_update_ack_pending;
+	atomic_t rdi1_update_ack_pending;
+	atomic_t rdi2_update_ack_pending;
 };
 
 struct axi_ctrl_t {
@@ -975,21 +992,12 @@
 };
 
 struct vfe32_ctrl_type {
-	uint32_t vfeImaskCompositePacked;
-
-	spinlock_t  update_ack_lock;
-	spinlock_t  start_ack_lock;
 	spinlock_t  state_lock;
-	spinlock_t  io_lock;
 	spinlock_t  stats_bufq_lock;
 	uint32_t extlen;
 	void *extdata;
 
-	int8_t start_ack_pending;
-	int8_t update_ack_pending;
-	bool is_reset_blocking;
-	struct completion reset_complete;
-	enum vfe_output_state recording_state;
+	int8_t vfe_sof_count_enable;
 	int8_t update_linear;
 	int8_t update_rolloff;
 	int8_t update_la;
@@ -1001,12 +1009,6 @@
 	uint32_t sync_timer_state;
 	uint32_t sync_timer_number;
 
-	uint32_t output1Pattern;
-	uint32_t output1Period;
-	uint32_t output2Pattern;
-	uint32_t output2Period;
-	uint32_t vfeFrameSkipCount;
-	uint32_t vfeFrameSkipPeriod;
 	struct msm_ver_num_info ver_num;
 	struct vfe_stats_control afbfStatsControl;
 	struct vfe_stats_control awbStatsControl;
diff --git a/drivers/media/video/msm/vfe/msm_vfe40.c b/drivers/media/video/msm/vfe/msm_vfe40.c
new file mode 100644
index 0000000..5a1d488
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40.c
@@ -0,0 +1,3699 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_isp.h>
+
+#include "msm.h"
+#include "msm_vfe40.h"
+
+struct vfe40_isr_queue_cmd {
+	struct list_head list;
+	uint32_t                           vfeInterruptStatus0;
+	uint32_t                           vfeInterruptStatus1;
+};
+
+static const char * const vfe40_general_cmd[] = {
+	"DUMMY_0",  /* 0 */
+	"SET_CLK",
+	"RESET",
+	"START",
+	"TEST_GEN_START",
+	"OPERATION_CFG",  /* 5 */
+	"AXI_OUT_CFG",
+	"CAMIF_CFG",
+	"AXI_INPUT_CFG",
+	"BLACK_LEVEL_CFG",
+	"ROLL_OFF_CFG",  /* 10 */
+	"DEMUX_CFG",
+	"FOV_CFG",
+	"MAIN_SCALER_CFG",
+	"WB_CFG",
+	"COLOR_COR_CFG", /* 15 */
+	"RGB_G_CFG",
+	"LA_CFG",
+	"CHROMA_EN_CFG",
+	"CHROMA_SUP_CFG",
+	"MCE_CFG", /* 20 */
+	"SK_ENHAN_CFG",
+	"ASF_CFG",
+	"S2Y_CFG",
+	"S2CbCr_CFG",
+	"CHROMA_SUBS_CFG",  /* 25 */
+	"OUT_CLAMP_CFG",
+	"FRAME_SKIP_CFG",
+	"DUMMY_1",
+	"DUMMY_2",
+	"DUMMY_3",  /* 30 */
+	"UPDATE",
+	"BL_LVL_UPDATE",
+	"DEMUX_UPDATE",
+	"FOV_UPDATE",
+	"MAIN_SCALER_UPDATE",  /* 35 */
+	"WB_UPDATE",
+	"COLOR_COR_UPDATE",
+	"RGB_G_UPDATE",
+	"LA_UPDATE",
+	"CHROMA_EN_UPDATE",  /* 40 */
+	"CHROMA_SUP_UPDATE",
+	"MCE_UPDATE",
+	"SK_ENHAN_UPDATE",
+	"S2CbCr_UPDATE",
+	"S2Y_UPDATE",  /* 45 */
+	"ASF_UPDATE",
+	"FRAME_SKIP_UPDATE",
+	"CAMIF_FRAME_UPDATE",
+	"STATS_AF_UPDATE",
+	"STATS_AE_UPDATE",  /* 50 */
+	"STATS_AWB_UPDATE",
+	"STATS_RS_UPDATE",
+	"STATS_CS_UPDATE",
+	"STATS_SKIN_UPDATE",
+	"STATS_IHIST_UPDATE",  /* 55 */
+	"DUMMY_4",
+	"EPOCH1_ACK",
+	"EPOCH2_ACK",
+	"START_RECORDING",
+	"STOP_RECORDING",  /* 60 */
+	"DUMMY_5",
+	"DUMMY_6",
+	"CAPTURE",
+	"DUMMY_7",
+	"STOP",  /* 65 */
+	"GET_HW_VERSION",
+	"GET_FRAME_SKIP_COUNTS",
+	"OUTPUT1_BUFFER_ENQ",
+	"OUTPUT2_BUFFER_ENQ",
+	"OUTPUT3_BUFFER_ENQ",  /* 70 */
+	"JPEG_OUT_BUF_ENQ",
+	"RAW_OUT_BUF_ENQ",
+	"RAW_IN_BUF_ENQ",
+	"STATS_AF_ENQ",
+	"STATS_AE_ENQ",  /* 75 */
+	"STATS_AWB_ENQ",
+	"STATS_RS_ENQ",
+	"STATS_CS_ENQ",
+	"STATS_SKIN_ENQ",
+	"STATS_IHIST_ENQ",  /* 80 */
+	"DUMMY_8",
+	"JPEG_ENC_CFG",
+	"DUMMY_9",
+	"STATS_AF_START",
+	"STATS_AF_STOP",  /* 85 */
+	"STATS_AE_START",
+	"STATS_AE_STOP",
+	"STATS_AWB_START",
+	"STATS_AWB_STOP",
+	"STATS_RS_START",  /* 90 */
+	"STATS_RS_STOP",
+	"STATS_CS_START",
+	"STATS_CS_STOP",
+	"STATS_SKIN_START",
+	"STATS_SKIN_STOP",  /* 95 */
+	"STATS_IHIST_START",
+	"STATS_IHIST_STOP",
+	"DUMMY_10",
+	"SYNC_TIMER_SETTING",
+	"ASYNC_TIMER_SETTING",  /* 100 */
+	"LIVESHOT",
+	"LA_SETUP",
+	"LINEARIZATION_CFG",
+	"DEMOSAICV3",
+	"DEMOSAICV3_ABCC_CFG", /* 105 */
+	"DEMOSAICV3_DBCC_CFG",
+	"DEMOSAICV3_DBPC_CFG",
+	"DEMOSAICV3_ABF_CFG",
+	"DEMOSAICV3_ABCC_UPDATE",
+	"DEMOSAICV3_DBCC_UPDATE", /* 110 */
+	"DEMOSAICV3_DBPC_UPDATE",
+	"XBAR_CFG",
+	"EZTUNE_CFG",
+	"V40_ZSL",
+	"LINEARIZATION_UPDATE", /*115*/
+	"DEMOSAICV3_ABF_UPDATE",
+	"CLF_CFG",
+	"CLF_LUMA_UPDATE",
+	"CLF_CHROMA_UPDATE",
+	"PCA_ROLL_OFF_CFG", /*120*/
+	"PCA_ROLL_OFF_UPDATE",
+	"GET_REG_DUMP",
+	"GET_LINEARIZATON_TABLE",
+	"GET_MESH_ROLLOFF_TABLE",
+	"GET_PCA_ROLLOFF_TABLE", /*125*/
+	"GET_RGB_G_TABLE",
+	"GET_LA_TABLE",
+	"DEMOSAICV3_UPDATE",
+	"ACTIVE_REGION_CONFIG",
+	"COLOR_PROCESSING_CONFIG", /*130*/
+	"STATS_WB_AEC_CONFIG",
+	"STATS_WB_AEC_UPDATE",
+	"Y_GAMMA_CONFIG",
+	"SCALE_OUTPUT1_CONFIG",
+	"SCALE_OUTPUT2_CONFIG", /*135*/
+	"CAPTURE_RAW",
+	"STOP_LIVESHOT",
+	"RECONFIG_VFE",
+	"STATS_REQBUF_CFG",
+	"STATS_ENQUEUEBUF_CFG",/*140*/
+	"STATS_FLUSH_BUFQ_CFG",
+	"FOV_ENC_CFG",
+	"FOV_VIEW_CFG",
+	"FOV_ENC_UPDATE",
+	"FOV_VIEW_UPDATE",/*145*/
+	"SCALER_ENC_CFG",
+	"SCALER_VIEW_CFG",
+	"SCALER_ENC_UPDATE",
+	"SCALER_VIEW_UPDATE",
+	"COLORXFORM_ENC_CFG",/*150*/
+	"COLORXFORM_VIEW_CFG",
+	"COLORXFORM_ENC_UPDATE",
+	"COLORXFORM_VIEW_UPDATE",
+};
+
+static void vfe40_stop(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+	/* for reset hw modules, and send msg when reset_irq comes.*/
+	spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+	vfe40_ctrl->share_ctrl->stop_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+
+	/* disable all interrupts.  */
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQ0,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQ1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* in either continuous or snapshot mode, stop command can be issued
+	 * at any time. stop camif immediately. */
+	msm_camera_io_w(CAMIF_COMMAND_STOP_IMMEDIATELY,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+}
+
+void vfe40_subdev_notify(int id, int path, int image_mode,
+	struct v4l2_subdev *sd, struct vfe_share_ctrl_t *share_ctrl)
+{
+	struct msm_vfe_resp rp;
+	struct msm_frame_info frame_info;
+	unsigned long flags = 0;
+	spin_lock_irqsave(&share_ctrl->sd_notify_lock, flags);
+	CDBG("%s: msgId = %d\n", __func__, id);
+	memset(&rp, 0, sizeof(struct msm_vfe_resp));
+	rp.evt_msg.type   = MSM_CAMERA_MSG;
+	frame_info.image_mode = image_mode;
+	frame_info.path = path;
+	rp.evt_msg.data = &frame_info;
+	rp.type	   = id;
+	v4l2_subdev_notify(sd, NOTIFY_VFE_BUF_EVT, &rp);
+	spin_unlock_irqrestore(&share_ctrl->sd_notify_lock, flags);
+}
+
+static void vfe40_reset_internal_variables(
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	vfe40_ctrl->vfeImaskCompositePacked = 0;
+	/* state control variables */
+	vfe40_ctrl->start_ack_pending = FALSE;
+	atomic_set(&vfe40_ctrl->share_ctrl->irq_cnt, 0);
+
+	spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+	vfe40_ctrl->share_ctrl->stop_ack_pending  = FALSE;
+	spin_unlock_irqrestore(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+
+	vfe40_ctrl->reset_ack_pending  = FALSE;
+
+	spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+	vfe40_ctrl->update_ack_pending = FALSE;
+	spin_unlock_irqrestore(&vfe40_ctrl->update_ack_lock, flags);
+
+	vfe40_ctrl->recording_state = VFE_STATE_IDLE;
+	vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+	/* 0 for continuous mode, 1 for snapshot mode */
+	vfe40_ctrl->share_ctrl->operation_mode = 0;
+	vfe40_ctrl->share_ctrl->outpath.output_mode = 0;
+	vfe40_ctrl->share_ctrl->vfe_capture_count = 0;
+
+	/* this is unsigned 32 bit integer. */
+	vfe40_ctrl->share_ctrl->vfeFrameId = 0;
+	/* Stats control variables. */
+	memset(&(vfe40_ctrl->afStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->awbStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->aecStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->ihistStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->rsStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->csStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	vfe40_ctrl->frame_skip_cnt = 31;
+	vfe40_ctrl->frame_skip_pattern = 0xffffffff;
+	vfe40_ctrl->snapshot_frame_cnt = 0;
+}
+
+static void vfe40_reset(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	vfe40_reset_internal_variables(vfe40_ctrl);
+	/* disable all interrupts.  vfeImaskLocal is also reset to 0
+	* to begin with. */
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1, vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* enable reset_ack interrupt.  */
+	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset
+	 * is done, hardware interrupt will be generated.  VFE ist processes
+	 * the interrupt to complete the function call.  Note that the reset
+	 * function is synchronous. */
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_0);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_1);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_2);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_3);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_4);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_5);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_6);
+	msm_camera_io_w(0x0002AAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static int vfe40_operation_config(uint32_t *cmd,
+			struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t *p = cmd;
+
+	vfe40_ctrl->share_ctrl->operation_mode = *p;
+	vfe40_ctrl->share_ctrl->stats_comp = *(++p);
+	vfe40_ctrl->hfr_mode = *(++p);
+
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CFG);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_RDI0_CFG);
+	if (msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+		V40_GET_HW_VERSION_OFF) ==
+		VFE40_HW_NUMBER) {
+		msm_camera_io_w(*(++p),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_RDI1_CFG);
+		msm_camera_io_w(*(++p),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_RDI2_CFG);
+	}  else {
+		++p;
+		++p;
+	}
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REALIGN_BUF);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CHROMA_UP);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+	return 0;
+}
+
+static unsigned long vfe40_stats_dqbuf(struct vfe40_ctrl_type *vfe40_ctrl,
+	enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_meta_buf *buf = NULL;
+	int rc = 0;
+	rc = vfe40_ctrl->stats_ops.dqbuf(
+			vfe40_ctrl->stats_ops.stats_ctrl, stats_type, &buf);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+	return buf->paddr;
+}
+
+static unsigned long vfe40_stats_flush_enqueue(
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	int rc = 0;
+	int i;
+
+	/*
+	 * Passing NULL for ion client as the buffers are already
+	 * mapped at this stage, client is not required, flush all
+	 * the buffers, and buffers move to PREPARE state
+	 */
+
+	rc = vfe40_ctrl->stats_ops.bufq_flush(
+			vfe40_ctrl->stats_ops.stats_ctrl, stats_type, NULL);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+	/* Queue all the buffers back to QUEUED state */
+	bufq = vfe40_ctrl->stats_ctrl.bufq[stats_type];
+	for (i = 0; i < bufq->num_bufs; i++) {
+		stats_buf = &bufq->bufs[i];
+		rc = vfe40_ctrl->stats_ops.enqueue_buf(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				&(stats_buf->info), NULL);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf (type = %d) err = %d",
+				 __func__, stats_type, rc);
+			return rc;
+		}
+	}
+	return 0L;
+}
+
+static int vfe_stats_awb_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_aec_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AEC_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_af_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	rc = vfe40_stats_flush_enqueue(vfe40_ctrl, MSM_STATS_TYPE_AF);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf err = %d",
+			   __func__, rc);
+		spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+		return -EINVAL;
+	}
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AF_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AF_WR_PONG_ADDR);
+
+	return 0;
+}
+
+static int vfe_stats_ihist_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_HIST_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_HIST_WR_PONG_ADDR);
+
+	return 0;
+}
+
+static int vfe_stats_rs_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_RS_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_RS_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_cs_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_CS_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_CS_WR_PONG_ADDR);
+	return 0;
+}
+
+static void vfe40_start_common(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_mask = 0x1E000011;
+	vfe40_ctrl->start_ack_pending = TRUE;
+	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
+		vfe40_ctrl->share_ctrl->operation_mode,
+		vfe40_ctrl->share_ctrl->outpath.output_mode);
+
+	msm_camera_io_w(irq_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+
+	msm_camera_io_dump(vfe40_ctrl->share_ctrl->vfebase,
+		vfe40_ctrl->share_ctrl->register_total*4);
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 1);
+}
+
+static int vfe40_start_recording(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+	vfe40_ctrl->recording_state = VFE_STATE_START_REQUESTED;
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	return 0;
+}
+
+static int vfe40_stop_recording(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	vfe40_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+	return 0;
+}
+
+static void vfe40_start_liveshot(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	/* Hardcode 1 live snapshot for now. */
+	vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt = 1;
+	vfe40_ctrl->share_ctrl->vfe_capture_count =
+		vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt;
+
+	vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
+	msm_camera_io_w_mb(1, vfe40_ctrl->
+		share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+}
+
+static int vfe40_zsl(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_comp_mask = 0;
+	/* capture command is valid for both idle and active state. */
+	irq_comp_mask	=
+		msm_camera_io_r(vfe40_ctrl->
+		share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	CDBG("%s:op mode %d O/P Mode %d\n", __func__,
+		vfe40_ctrl->share_ctrl->operation_mode,
+		vfe40_ctrl->share_ctrl->outpath.output_mode);
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+		VFE40_OUTPUT_MODE_PRIMARY) {
+		irq_comp_mask |= (
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0)) |
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch1)));
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0)) |
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch1)) |
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch2)));
+	}
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+		VFE40_OUTPUT_MODE_SECONDARY) {
+		irq_comp_mask |= ((0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0 + 8)) |
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1 + 8)));
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			   VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0 + 8)) |
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1 + 8)) |
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch2 + 8)));
+	}
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch1]);
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch1]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch2]);
+	}
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch1]);
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch1]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch2]);
+	}
+
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	vfe40_start_common(vfe40_ctrl);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
+
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x18C);
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x188);
+	return 0;
+}
+static int vfe40_capture_raw(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t num_frames_capture)
+{
+	uint32_t irq_comp_mask = 0;
+
+	vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt = num_frames_capture;
+	vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+
+	irq_comp_mask	=
+		msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+		VFE40_OUTPUT_MODE_PRIMARY) {
+		irq_comp_mask |=
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0));
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch0]);
+	}
+
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+	vfe40_start_common(vfe40_ctrl);
+	return 0;
+}
+
+static int vfe40_capture(
+	struct msm_cam_media_controller *pmctl,
+	uint32_t num_frames_capture,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_comp_mask = 0;
+
+	/* capture command is valid for both idle and active state. */
+	vfe40_ctrl->share_ctrl->outpath.out1.capture_cnt = num_frames_capture;
+	if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) {
+		vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt =
+			num_frames_capture;
+	}
+
+	vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+	irq_comp_mask = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN) {
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			irq_comp_mask |= (0x1 << vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0 |
+				0x1 << vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1);
+		}
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+			irq_comp_mask |=
+				(0x1 << (vfe40_ctrl->
+					share_ctrl->outpath.out1.ch0 + 8) |
+				0x1 << (vfe40_ctrl->
+					share_ctrl->outpath.out1.ch1 + 8));
+		}
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		}
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		}
+	}
+
+	vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+
+	vfe40_start_common(vfe40_ctrl);
+	/* for debug */
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x18C);
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x188);
+	return 0;
+}
+
+static int vfe40_start(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_comp_mask = 0;
+	irq_comp_mask	=
+		msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_COMP_MASK);
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+		irq_comp_mask |= (
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch0 |
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch1);
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			   VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch0 |
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch1 |
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch2);
+	}
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+		irq_comp_mask |= (
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch1 + 8));
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch2 + 8));
+	}
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	/*
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);*/
+	vfe40_start_common(vfe40_ctrl);
+	return 0;
+}
+
+static void vfe40_update(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t value = 0;
+	if (vfe40_ctrl->update_linear) {
+		if (!msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1))
+			msm_camera_io_w(1,
+				vfe40_ctrl->share_ctrl->vfebase +
+				V40_LINEARIZATION_OFF1);
+		else
+			msm_camera_io_w(0,
+				vfe40_ctrl->share_ctrl->vfebase +
+				V40_LINEARIZATION_OFF1);
+		vfe40_ctrl->update_linear = false;
+	}
+
+	if (vfe40_ctrl->update_la) {
+		if (!msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF))
+			msm_camera_io_w(1,
+				vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+		else
+			msm_camera_io_w(0,
+				vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+		vfe40_ctrl->update_la = false;
+	}
+
+	if (vfe40_ctrl->update_gamma) {
+		value = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		value ^= V40_GAMMA_LUT_BANK_SEL_MASK;
+		msm_camera_io_w(value,
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		vfe40_ctrl->update_gamma = false;
+	}
+
+	spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+	vfe40_ctrl->update_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe40_ctrl->update_ack_lock, flags);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	return;
+}
+
+static void vfe40_sync_timer_stop(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t value = 0;
+	vfe40_ctrl->sync_timer_state = 0;
+	if (vfe40_ctrl->sync_timer_number == 0)
+		value = 0x10000;
+	else if (vfe40_ctrl->sync_timer_number == 1)
+		value = 0x20000;
+	else if (vfe40_ctrl->sync_timer_number == 2)
+		value = 0x40000;
+
+	/* Timer Stop */
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF);
+}
+
+static void vfe40_sync_timer_start(
+	const uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = 1;
+	uint32_t val;
+
+	vfe40_ctrl->sync_timer_state = *tbl++;
+	vfe40_ctrl->sync_timer_repeat_count = *tbl++;
+	vfe40_ctrl->sync_timer_number = *tbl++;
+	CDBG("%s timer_state %d, repeat_cnt %d timer number %d\n",
+		 __func__, vfe40_ctrl->sync_timer_state,
+		 vfe40_ctrl->sync_timer_repeat_count,
+		 vfe40_ctrl->sync_timer_number);
+
+	if (vfe40_ctrl->sync_timer_state) { /* Start Timer */
+		value = value << vfe40_ctrl->sync_timer_number;
+	} else { /* Stop Timer */
+		CDBG("Failed to Start timer\n");
+		return;
+	}
+
+	/* Timer Start */
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF);
+	/* Sync Timer Line Start */
+	value = *tbl++;
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+		4 + ((vfe40_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Start */
+	value = *tbl++;
+	msm_camera_io_w(value,
+			vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+			 8 + ((vfe40_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Duration */
+	value = *tbl++;
+	val = vfe40_ctrl->share_ctrl->vfe_clk_rate / 10000;
+	val = 10000000 / val;
+	val = value * 10000 / val;
+	CDBG("%s: Pixel Clk Cycles!!! %d\n", __func__, val);
+	msm_camera_io_w(val,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+		12 + ((vfe40_ctrl->sync_timer_number) * 12));
+	/* Timer0 Active High/LOW */
+	value = *tbl++;
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_POLARITY_OFF);
+	/* Selects sync timer 0 output to drive onto timer1 port */
+	value = 0;
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_TIMER_SELECT_OFF);
+}
+
+static void vfe40_program_dmi_cfg(
+	enum VFE40_DMI_RAM_SEL bankSel,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = VFE_DMI_CFG_DEFAULT;
+	value += (uint32_t)bankSel;
+	CDBG("%s: banksel = %d\n", __func__, bankSel);
+
+	msm_camera_io_w(value, vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_CFG);
+	/* by default, always starts with offset 0.*/
+	msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_ADDR);
+}
+static void vfe40_write_gamma_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	const uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	int i;
+	uint32_t value, value1, value2;
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	for (i = 0 ; i < (VFE40_GAMMA_NUM_ENTRIES/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_camera_io_w((value1),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_camera_io_w((value2),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+static void vfe40_read_gamma_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	int i;
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	CDBG("%s: Gamma table channel: %d\n", __func__, channel_sel);
+	for (i = 0 ; i < VFE40_GAMMA_NUM_ENTRIES ; i++) {
+		*tbl = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		CDBG("%s: %08x\n", __func__, *tbl);
+		tbl++;
+	}
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+static void vfe40_write_la_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	const uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t i;
+	uint32_t value, value1, value2;
+
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	for (i = 0 ; i < (VFE40_LA_TABLE_LENGTH/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_camera_io_w((value1),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_camera_io_w((value2),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+struct vfe40_output_ch *vfe40_get_ch(
+	int path, struct vfe_share_ctrl_t *share_ctrl)
+{
+	struct vfe40_output_ch *ch = NULL;
+
+	if (path == VFE_MSG_OUTPUT_PRIMARY)
+		ch = &share_ctrl->outpath.out0;
+	else if (path == VFE_MSG_OUTPUT_SECONDARY)
+		ch = &share_ctrl->outpath.out1;
+	else
+		pr_err("%s: Invalid path %d\n", __func__,
+			path);
+
+	BUG_ON(ch == NULL);
+	return ch;
+}
+
+static int vfe40_configure_pingpong_buffers(
+	int id, int path, struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	struct vfe40_output_ch *outch = NULL;
+	int rc = 0;
+	uint32_t image_mode = 0;
+	if (path == VFE_MSG_OUTPUT_PRIMARY)
+		image_mode = vfe40_ctrl->share_ctrl->outpath.out0.image_mode;
+	else
+		image_mode = vfe40_ctrl->share_ctrl->outpath.out1.image_mode;
+
+	vfe40_subdev_notify(id, path, image_mode,
+		&vfe40_ctrl->subdev, vfe40_ctrl->share_ctrl);
+	outch = vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+	if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
+		/* Configure Preview Ping Pong */
+		CDBG("%s Configure ping/pong address for %d",
+						__func__, path);
+		vfe40_put_ch_ping_addr(
+			vfe40_ctrl->share_ctrl->vfebase, outch->ch0,
+			outch->ping.ch_paddr[0]);
+		vfe40_put_ch_pong_addr(
+			vfe40_ctrl->share_ctrl->vfebase, outch->ch0,
+			outch->pong.ch_paddr[0]);
+
+		if (vfe40_ctrl->share_ctrl->operation_mode !=
+			VFE_OUTPUTS_RAW) {
+			vfe40_put_ch_ping_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch1,
+				outch->ping.ch_paddr[1]);
+			vfe40_put_ch_pong_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch1,
+				outch->pong.ch_paddr[1]);
+		}
+
+		if (outch->ping.num_planes > 2)
+			vfe40_put_ch_ping_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch2,
+				outch->ping.ch_paddr[2]);
+		if (outch->pong.num_planes > 2)
+			vfe40_put_ch_pong_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch2,
+				outch->pong.ch_paddr[2]);
+
+		/* avoid stale info */
+		memset(&outch->ping, 0, sizeof(struct msm_free_buf));
+		memset(&outch->pong, 0, sizeof(struct msm_free_buf));
+	} else {
+		pr_err("%s ping/pong addr is null!!", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static void vfe40_write_linear_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	const uint32_t *tbl, struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t i;
+
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	/* for loop for configuring LUT. */
+	for (i = 0 ; i < VFE40_LINEARIZATON_TABLE_LENGTH ; i++) {
+		msm_camera_io_w(*tbl,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		tbl++;
+	}
+	CDBG("done writing to linearization table\n");
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+void vfe40_send_isp_msg(
+	struct v4l2_subdev *sd,
+	uint32_t vfeFrameId,
+	uint32_t isp_msg_id)
+{
+	struct isp_msg_event isp_msg_evt;
+
+	isp_msg_evt.msg_id = isp_msg_id;
+	isp_msg_evt.sof_count = vfeFrameId;
+	v4l2_subdev_notify(sd,
+			NOTIFY_ISP_MSG_EVT,
+			(void *)&isp_msg_evt);
+}
+
+static int vfe40_proc_general(
+	struct msm_cam_media_controller *pmctl,
+	struct msm_isp_cmd *cmd,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	int i , rc = 0;
+	uint32_t old_val = 0 , new_val = 0;
+	uint32_t *cmdp = NULL;
+	uint32_t *cmdp_local = NULL;
+	uint32_t snapshot_cnt = 0;
+	uint32_t temp1 = 0, temp2 = 0;
+
+	CDBG("vfe40_proc_general: cmdID = %s, length = %d\n",
+		vfe40_general_cmd[cmd->id], cmd->length);
+	switch (cmd->id) {
+	case VFE_CMD_RESET:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		vfe40_reset(vfe40_ctrl);
+		break;
+	case VFE_CMD_START:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		if ((vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
+				(vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW))
+			/* Configure primary channel */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		else
+			/* Configure secondary channel */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_SECONDARY,
+				vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+				"%s error configuring pingpong buffers for preview",
+				__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+
+		rc = vfe40_start(pmctl, vfe40_ctrl);
+		break;
+	case VFE_CMD_UPDATE:
+		vfe40_update(vfe40_ctrl);
+		break;
+	case VFE_CMD_CAPTURE_RAW:
+		CDBG("%s: cmdID = VFE_CMD_CAPTURE_RAW\n", __func__);
+		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+				sizeof(uint32_t))) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe40_configure_pingpong_buffers(
+			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+			vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+				"%s error configuring pingpong buffers for snapshot",
+				__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		rc = vfe40_capture_raw(pmctl, vfe40_ctrl, snapshot_cnt);
+		break;
+	case VFE_CMD_CAPTURE:
+		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+				sizeof(uint32_t))) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) {
+			if (snapshot_cnt != 1) {
+				pr_err("only support 1 inline snapshot\n");
+				rc = -EINVAL;
+				goto proc_general_done;
+			}
+			/* Configure primary channel for JPEG */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_JPEG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		} else {
+			/* Configure primary channel */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		}
+		if (rc < 0) {
+			pr_err(
+			"%s error configuring pingpong buffers for primary output",
+			__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		/* Configure secondary channel */
+		rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+				vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+			"%s error configuring pingpong buffers for secondary output",
+			__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		rc = vfe40_capture(pmctl, snapshot_cnt, vfe40_ctrl);
+		break;
+	case VFE_CMD_START_RECORDING:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_PREVIEW_AND_VIDEO)
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_SECONDARY,
+				vfe40_ctrl);
+		else if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_VIDEO_AND_PREVIEW)
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+				"%s error configuring pingpong buffers for video\n",
+				__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		rc = vfe40_start_recording(pmctl, vfe40_ctrl);
+		break;
+	case VFE_CMD_STOP_RECORDING:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		rc = vfe40_stop_recording(pmctl, vfe40_ctrl);
+		break;
+	case VFE_CMD_OPERATION_CFG: {
+		if (cmd->length != V40_OPERATION_CFG_LEN) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(V40_OPERATION_CFG_LEN, GFP_ATOMIC);
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			V40_OPERATION_CFG_LEN)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe40_operation_config(cmdp, vfe40_ctrl);
+		}
+		break;
+
+	case VFE_CMD_STATS_AE_START: {
+		rc = vfe_stats_aec_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AEC",
+				 __func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= BG_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+	case VFE_CMD_STATS_AF_START: {
+		rc = vfe_stats_af_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AF",
+				__func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			VFE_MODULE_CFG);
+		old_val |= BF_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_STATS_AWB_START: {
+		rc = vfe_stats_awb_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AWB",
+				 __func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AWB_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_STATS_IHIST_START: {
+		rc = vfe_stats_ihist_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of IHIST",
+				 __func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= IHIST_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+
+	case VFE_CMD_STATS_RS_START: {
+		rc = vfe_stats_rs_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of RS",
+				__func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_STATS_CS_START: {
+		rc = vfe_stats_cs_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of CS",
+				__func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_MCE_UPDATE:
+	case VFE_CMD_MCE_CFG:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		/* Incrementing with 4 so as to point to the 2nd Register as
+		the 2nd register has the mce_enable bit */
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+		old_val &= MCE_EN_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4, &new_val, 4);
+		cmdp_local += 1;
+
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8);
+		new_val = *cmdp_local;
+		old_val &= MCE_Q_K_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8, &new_val, 4);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+	case VFE_CMD_CHROMA_SUP_UPDATE:
+	case VFE_CMD_CHROMA_SUP_CFG:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF, cmdp_local, 4);
+
+		cmdp_local += 1;
+		new_val = *cmdp_local;
+		/* Incrementing with 4 so as to point to the 2nd Register as
+		 * the 2nd register has the mce_enable bit
+		 */
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4);
+		old_val &= ~MCE_EN_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4, &new_val, 4);
+		cmdp_local += 1;
+
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8);
+		new_val = *cmdp_local;
+		old_val &= ~MCE_Q_K_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8, &new_val, 4);
+		}
+		break;
+	case VFE_CMD_BLACK_LEVEL_CFG:
+		rc = -EFAULT;
+		goto proc_general_done;
+
+	case VFE_CMD_LA_CFG:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+
+		cmdp_local += 1;
+		vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+						   cmdp_local, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_LA_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+
+		cmdp_local = cmdp + 1;
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+		if (old_val != 0x0)
+			vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+				cmdp_local, vfe40_ctrl);
+		else
+			vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK1,
+				cmdp_local, vfe40_ctrl);
+		}
+		vfe40_ctrl->update_la = true;
+		break;
+
+	case VFE_CMD_GET_LA_TABLE:
+		temp1 = sizeof(uint32_t) * VFE40_LA_TABLE_LENGTH / 2;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kzalloc(temp1, GFP_KERNEL);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		if (msm_camera_io_r(vfe40_ctrl->
+				share_ctrl->vfebase + V40_LA_OFF))
+			vfe40_program_dmi_cfg(LUMA_ADAPT_LUT_RAM_BANK1,
+						vfe40_ctrl);
+		else
+			vfe40_program_dmi_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+						vfe40_ctrl);
+		for (i = 0 ; i < (VFE40_LA_TABLE_LENGTH / 2) ; i++) {
+			*cmdp_local =
+				msm_camera_io_r(
+					vfe40_ctrl->share_ctrl->vfebase +
+					VFE_DMI_DATA_LO);
+			*cmdp_local |= (msm_camera_io_r(
+				vfe40_ctrl->share_ctrl->vfebase +
+				VFE_DMI_DATA_LO)) << 16;
+			cmdp_local++;
+		}
+		vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_SK_ENHAN_CFG:
+	case VFE_CMD_SK_ENHAN_UPDATE:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_SCE_OFF,
+			cmdp, V40_SCE_LEN);
+		}
+		break;
+
+	case VFE_CMD_LIVESHOT:
+		/* Configure primary channel */
+		rc = vfe40_configure_pingpong_buffers(VFE_MSG_CAPTURE,
+					VFE_MSG_OUTPUT_PRIMARY, vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+			"%s error configuring pingpong buffers for primary output\n",
+			__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		vfe40_start_liveshot(pmctl, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_LINEARIZATION_CFG:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1,
+			cmdp_local, V40_LINEARIZATION_LEN1);
+
+		cmdp_local = cmdp + 17;
+		vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK0,
+					cmdp_local, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_LINEARIZATION_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		cmdp_local++;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1 + 4,
+			cmdp_local, (V40_LINEARIZATION_LEN1 - 4));
+		cmdp_local = cmdp + 17;
+		/*extracting the bank select*/
+		old_val = msm_camera_io_r(
+				vfe40_ctrl->share_ctrl->vfebase +
+				V40_LINEARIZATION_OFF1);
+
+		if (old_val != 0x0)
+			vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK0,
+						cmdp_local, vfe40_ctrl);
+		else
+			vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK1,
+						cmdp_local, vfe40_ctrl);
+		vfe40_ctrl->update_linear = true;
+		break;
+
+	case VFE_CMD_GET_LINEARIZATON_TABLE:
+		temp1 = sizeof(uint32_t) * VFE40_LINEARIZATON_TABLE_LENGTH;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kzalloc(temp1, GFP_KERNEL);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		if (msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1))
+			vfe40_program_dmi_cfg(BLACK_LUT_RAM_BANK1, vfe40_ctrl);
+		else
+			vfe40_program_dmi_cfg(BLACK_LUT_RAM_BANK0, vfe40_ctrl);
+		CDBG("%s: Linearization Table\n", __func__);
+		for (i = 0 ; i < VFE40_LINEARIZATON_TABLE_LENGTH ; i++) {
+			*cmdp_local = msm_camera_io_r(
+				vfe40_ctrl->share_ctrl->vfebase +
+				VFE_DMI_DATA_LO);
+			CDBG("%s: %08x\n", __func__, *cmdp_local);
+			cmdp_local++;
+		}
+		vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_DEMOSAICV3:
+		if (cmd->length !=
+			V40_DEMOSAICV3_0_LEN+V40_DEMOSAICV3_1_LEN) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DEMOSAIC_MASK;
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, V40_DEMOSAICV3_0_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_1_OFF,
+			cmdp_local, V40_DEMOSAICV3_1_LEN);
+		break;
+
+	case VFE_CMD_DEMOSAICV3_UPDATE:
+		if (cmd->length !=
+			V40_DEMOSAICV3_0_LEN * V40_DEMOSAICV3_UP_REG_CNT) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DEMOSAIC_MASK;
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, V40_DEMOSAICV3_0_LEN);
+		/* As the address space is not contiguous increment by 2
+		 * before copying to next address space */
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_1_OFF,
+			cmdp_local, 2 * V40_DEMOSAICV3_0_LEN);
+		/* As the address space is not contiguous increment by 2
+		 * before copying to next address space */
+		cmdp_local += 2;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_2_OFF,
+			cmdp_local, 2 * V40_DEMOSAICV3_0_LEN);
+		break;
+
+	case VFE_CMD_DEMOSAICV3_ABCC_CFG:
+		rc = -EFAULT;
+		break;
+
+	case VFE_CMD_DEMOSAICV3_ABF_UPDATE:/* 116 ABF update  */
+	case VFE_CMD_DEMOSAICV3_ABF_CFG: { /* 108 ABF config  */
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= ABF_MASK;
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, 4);
+
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_DEMOSAICV3_DBCC_CFG:
+	case VFE_CMD_DEMOSAICV3_DBCC_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DBCC_MASK;
+
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, 4);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+		break;
+
+	case VFE_CMD_DEMOSAICV3_DBPC_CFG:
+	case VFE_CMD_DEMOSAICV3_DBPC_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DBPC_MASK;
+
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_0_OFF,
+			cmdp_local, V40_DEMOSAICV3_0_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF0,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF1,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF2,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		break;
+
+	case VFE_CMD_RGB_G_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF,
+			cmdp, 4);
+		cmdp += 1;
+
+		vfe40_write_gamma_cfg(RGBLUT_RAM_CH0_BANK0, cmdp, vfe40_ctrl);
+		vfe40_write_gamma_cfg(RGBLUT_RAM_CH1_BANK0, cmdp, vfe40_ctrl);
+		vfe40_write_gamma_cfg(RGBLUT_RAM_CH2_BANK0, cmdp, vfe40_ctrl);
+		}
+	    cmdp -= 1;
+		break;
+
+	case VFE_CMD_RGB_G_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		cmdp += 1;
+		if (old_val != 0x0) {
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH0_BANK0, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH1_BANK0, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH2_BANK0, cmdp, vfe40_ctrl);
+		} else {
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH0_BANK1, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH1_BANK1, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH2_BANK1, cmdp, vfe40_ctrl);
+		}
+		}
+		vfe40_ctrl->update_gamma = TRUE;
+		cmdp -= 1;
+		break;
+
+	case VFE_CMD_GET_RGB_G_TABLE:
+		temp1 = sizeof(uint32_t) * VFE40_GAMMA_NUM_ENTRIES * 3;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kzalloc(temp1, GFP_KERNEL);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		temp2 = old_val ? RGBLUT_RAM_CH0_BANK1 :
+			RGBLUT_RAM_CH0_BANK0;
+		for (i = 0; i < 3; i++) {
+			vfe40_read_gamma_cfg(temp2,
+				cmdp_local + (VFE40_GAMMA_NUM_ENTRIES * i),
+				vfe40_ctrl);
+			temp2 += 2;
+		}
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+
+	case VFE_CMD_STATS_AWB_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AWB_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case VFE_CMD_STATS_AE_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~BG_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case VFE_CMD_STATS_AF_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~BF_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		rc = vfe40_stats_flush_enqueue(vfe40_ctrl, MSM_STATS_TYPE_AF);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf err = %d",
+				   __func__, rc);
+			return -EINVAL;
+		}
+		}
+		break;
+
+	case VFE_CMD_STATS_IHIST_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~IHIST_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case VFE_CMD_STATS_RS_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~RS_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case VFE_CMD_STATS_CS_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~CS_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case VFE_CMD_STOP:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		vfe40_stop(vfe40_ctrl);
+		break;
+
+	case VFE_CMD_SYNC_TIMER_SETTING:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		vfe40_sync_timer_start(cmdp, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_MODULE_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		*cmdp &= ~STATS_ENABLE_MASK;
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= STATS_ENABLE_MASK;
+		*cmdp |= old_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_ZSL:
+		rc = vfe40_configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_PRIMARY, vfe40_ctrl);
+		if (rc < 0)
+			goto proc_general_done;
+		rc = vfe40_configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_SECONDARY, vfe40_ctrl);
+		if (rc < 0)
+			goto proc_general_done;
+
+		rc = vfe40_zsl(pmctl, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_ASF_CFG:
+	case VFE_CMD_ASF_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		cmdp_local = cmdp + V40_ASF_LEN/4;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_ASF_SPECIAL_EFX_CFG_OFF,
+			cmdp_local, V40_ASF_SPECIAL_EFX_CFG_LEN);
+		break;
+
+	case VFE_CMD_GET_HW_VERSION:
+		if (cmd->length != V40_GET_HW_VERSION_LEN) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(V40_GET_HW_VERSION_LEN, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		*cmdp = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase+V40_GET_HW_VERSION_OFF);
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			V40_GET_HW_VERSION_LEN)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_GET_REG_DUMP:
+		temp1 = sizeof(uint32_t) *
+			vfe40_ctrl->share_ctrl->register_total;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(temp1, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		msm_camera_io_dump(vfe40_ctrl->share_ctrl->vfebase,
+			vfe40_ctrl->share_ctrl->register_total*4);
+		CDBG("%s: %p %p %d\n", __func__, (void *)cmdp,
+			vfe40_ctrl->share_ctrl->vfebase, temp1);
+		memcpy_fromio((void *)cmdp,
+			vfe40_ctrl->share_ctrl->vfebase, temp1);
+		if (copy_to_user((void __user *)(cmd->value), cmdp, temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_FRAME_SKIP_CFG:
+		if (cmd->length != vfe40_cmd[cmd->id].length)
+			return -EINVAL;
+
+		cmdp = kmalloc(vfe40_cmd[cmd->id].length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+
+		if (copy_from_user((cmdp), (void __user *)cmd->value,
+				cmd->length)) {
+			rc = -EFAULT;
+			pr_err("%s copy from user failed for cmd %d",
+				__func__, cmd->id);
+			break;
+		}
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		vfe40_ctrl->frame_skip_cnt = ((uint32_t)
+			*cmdp & VFE_FRAME_SKIP_PERIOD_MASK) + 1;
+		vfe40_ctrl->frame_skip_pattern = (uint32_t)(*(cmdp + 2));
+		break;
+	default:
+		if (cmd->length != vfe40_cmd[cmd->id].length)
+			return -EINVAL;
+
+		cmdp = kmalloc(vfe40_cmd[cmd->id].length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+
+		if (copy_from_user((cmdp), (void __user *)cmd->value,
+				cmd->length)) {
+			rc = -EFAULT;
+			pr_err("%s copy from user failed for cmd %d",
+				__func__, cmd->id);
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		break;
+
+	}
+
+proc_general_done:
+	kfree(cmdp);
+
+	return rc;
+}
+
+static inline void vfe40_read_irq_status(
+	struct axi_ctrl_t *axi_ctrl, struct vfe40_irq_status *out)
+{
+	uint32_t *temp;
+	memset(out, 0, sizeof(struct vfe40_irq_status));
+	temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_IRQ_STATUS_0);
+	out->vfeIrqStatus0 = msm_camera_io_r(temp);
+
+	temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_IRQ_STATUS_1);
+	out->vfeIrqStatus1 = msm_camera_io_r(temp);
+
+	temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_CAMIF_STATUS);
+	out->camifStatus = msm_camera_io_r(temp);
+	CDBG("camifStatus  = 0x%x\n", out->camifStatus);
+
+	/* clear the pending interrupt of the same kind.*/
+	msm_camera_io_w(out->vfeIrqStatus0,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_camera_io_w(out->vfeIrqStatus1,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1, axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+}
+
+static void vfe40_process_reg_update_irq(
+		struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+
+	if (vfe40_ctrl->recording_state == VFE_STATE_START_REQUESTED) {
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		} else if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		}
+		vfe40_ctrl->recording_state = VFE_STATE_STARTED;
+		msm_camera_io_w_mb(1,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		CDBG("start video triggered .\n");
+	} else if (vfe40_ctrl->recording_state ==
+			VFE_STATE_STOP_REQUESTED) {
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		} else if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		}
+		CDBG("stop video triggered .\n");
+	}
+
+	if (vfe40_ctrl->start_ack_pending == TRUE) {
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
+		vfe40_ctrl->start_ack_pending = FALSE;
+	} else {
+		if (vfe40_ctrl->recording_state ==
+				VFE_STATE_STOP_REQUESTED) {
+			vfe40_ctrl->recording_state = VFE_STATE_STOPPED;
+			/* request a reg update and send STOP_REC_ACK
+			 * when we process the next reg update irq.
+			 */
+			msm_camera_io_w_mb(1,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		} else if (vfe40_ctrl->recording_state ==
+					VFE_STATE_STOPPED) {
+			vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+				vfe40_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_STOP_REC_ACK);
+			vfe40_ctrl->recording_state = VFE_STATE_IDLE;
+		}
+		spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+		if (vfe40_ctrl->update_ack_pending == TRUE) {
+			vfe40_ctrl->update_ack_pending = FALSE;
+			spin_unlock_irqrestore(
+				&vfe40_ctrl->update_ack_lock, flags);
+			vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+				vfe40_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_UPDATE_ACK);
+		} else {
+			spin_unlock_irqrestore(
+				&vfe40_ctrl->update_ack_lock, flags);
+		}
+	}
+
+	if (vfe40_ctrl->share_ctrl->liveshot_state ==
+		VFE_STATE_START_REQUESTED) {
+		CDBG("%s enabling liveshot output\n", __func__);
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+			vfe40_ctrl->share_ctrl->liveshot_state =
+				VFE_STATE_STARTED;
+		}
+	}
+
+	if (vfe40_ctrl->share_ctrl->liveshot_state == VFE_STATE_STARTED) {
+		vfe40_ctrl->share_ctrl->vfe_capture_count--;
+		if (!vfe40_ctrl->share_ctrl->vfe_capture_count)
+			vfe40_ctrl->share_ctrl->liveshot_state =
+				VFE_STATE_STOP_REQUESTED;
+		msm_camera_io_w_mb(1, vfe40_ctrl->
+			share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	} else if (vfe40_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOP_REQUESTED) {
+		CDBG("%s: disabling liveshot output\n", __func__);
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+			vfe40_ctrl->share_ctrl->liveshot_state =
+				VFE_STATE_STOPPED;
+			msm_camera_io_w_mb(1, vfe40_ctrl->share_ctrl->vfebase +
+				VFE_REG_UPDATE_CMD);
+		}
+	} else if (vfe40_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOPPED) {
+		vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+	}
+
+	if ((vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN) ||
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB) ||
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) ||
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB)) {
+		/* in snapshot mode */
+		/* later we need to add check for live snapshot mode. */
+		if (vfe40_ctrl->frame_skip_pattern & (0x1 <<
+			(vfe40_ctrl->snapshot_frame_cnt %
+				vfe40_ctrl->frame_skip_cnt))) {
+			vfe40_ctrl->share_ctrl->vfe_capture_count--;
+			/* if last frame to be captured: */
+			if (vfe40_ctrl->share_ctrl->vfe_capture_count == 0) {
+				/* stop the bus output:write master enable = 0*/
+				if (vfe40_ctrl->share_ctrl->outpath.output_mode
+					& VFE40_OUTPUT_MODE_PRIMARY) {
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out0.ch0]);
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out0.ch1]);
+				}
+				if (vfe40_ctrl->share_ctrl->outpath.output_mode&
+						VFE40_OUTPUT_MODE_SECONDARY) {
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out1.ch0]);
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out1.ch1]);
+				}
+				msm_camera_io_w_mb
+				(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+				vfe40_ctrl->share_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+				vfe40_ctrl->snapshot_frame_cnt = -1;
+				vfe40_ctrl->frame_skip_cnt = 31;
+				vfe40_ctrl->frame_skip_pattern = 0xffffffff;
+			} /*if snapshot count is 0*/
+		} /*if frame is not being dropped*/
+		vfe40_ctrl->snapshot_frame_cnt++;
+		/* then do reg_update. */
+		msm_camera_io_w(1,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	} /* if snapshot mode. */
+}
+
+static void vfe40_set_default_reg_values(
+			struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	msm_camera_io_w(0x800080,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_DEMUX_GAIN_0);
+	msm_camera_io_w(0x800080,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_DEMUX_GAIN_1);
+	/* What value should we program CGC_OVERRIDE to? */
+	msm_camera_io_w(0xFFFFF,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CGC_OVERRIDE);
+
+	/* default frame drop period and pattern */
+	msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_ENC_MIN);
+	msm_camera_io_w(0xFFFFFF,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_ENC_MAX);
+	msm_camera_io_w(0,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_VIEW_MIN);
+	msm_camera_io_w(0xFFFFFF,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_VIEW_MAX);
+
+	/* stats UB config */
+	msm_camera_io_w(0x3980007,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
+	msm_camera_io_w(0x3A00007,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
+	msm_camera_io_w(0x3A8000F,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
+	msm_camera_io_w(0x3B80007,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
+	msm_camera_io_w(0x3C0001F,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
+	msm_camera_io_w(0x3E0001F,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+}
+
+static void vfe40_process_reset_irq(
+		struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+	spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+	if (vfe40_ctrl->share_ctrl->stop_ack_pending) {
+		vfe40_ctrl->share_ctrl->stop_ack_pending = FALSE;
+		spin_unlock_irqrestore(
+			&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_STOP_ACK);
+	} else {
+		spin_unlock_irqrestore(
+			&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+		/* this is from reset command. */
+		vfe40_set_default_reg_values(vfe40_ctrl);
+
+		/* reload all write masters. (frame & line)*/
+		msm_camera_io_w(0x7FFF,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_CMD);
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_RESET_ACK);
+	}
+}
+
+static void vfe40_process_camif_sof_irq(
+		struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	if (vfe40_ctrl->share_ctrl->operation_mode ==
+		VFE_OUTPUTS_RAW) {
+		if (vfe40_ctrl->start_ack_pending) {
+			vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+				vfe40_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_START_ACK);
+			vfe40_ctrl->start_ack_pending = FALSE;
+		}
+		vfe40_ctrl->share_ctrl->vfe_capture_count--;
+		/* if last frame to be captured: */
+		if (vfe40_ctrl->share_ctrl->vfe_capture_count == 0) {
+			/* Ensure the write order while writing
+			 to the command register using the barrier */
+			msm_camera_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+		}
+	} /* if raw snapshot mode. */
+	if ((vfe40_ctrl->hfr_mode != HFR_MODE_OFF) &&
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_VIDEO) &&
+		(vfe40_ctrl->share_ctrl->vfeFrameId %
+			vfe40_ctrl->hfr_mode != 0)) {
+		vfe40_ctrl->share_ctrl->vfeFrameId++;
+		CDBG("Skip the SOF notification when HFR enabled\n");
+		return;
+	}
+	vfe40_ctrl->share_ctrl->vfeFrameId++;
+	vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+		vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_SOF_ACK);
+	CDBG("camif_sof_irq, frameId = %d\n",
+		vfe40_ctrl->share_ctrl->vfeFrameId);
+
+	if (vfe40_ctrl->sync_timer_state) {
+		if (vfe40_ctrl->sync_timer_repeat_count == 0)
+			vfe40_sync_timer_stop(vfe40_ctrl);
+		else
+			vfe40_ctrl->sync_timer_repeat_count--;
+	}
+}
+
+static void vfe40_process_error_irq(
+	struct axi_ctrl_t *axi_ctrl, uint32_t errStatus)
+{
+	uint32_t reg_value;
+
+	if (errStatus & VFE40_IMASK_CAMIF_ERROR) {
+		pr_err("vfe40_irq: camif errors\n");
+		reg_value = msm_camera_io_r(
+			axi_ctrl->share_ctrl->vfebase + VFE_CAMIF_STATUS);
+		pr_err("camifStatus  = 0x%x\n", reg_value);
+		vfe40_send_isp_msg(&axi_ctrl->subdev,
+			axi_ctrl->share_ctrl->vfeFrameId, MSG_ID_CAMIF_ERROR);
+	}
+
+	if (errStatus & VFE40_IMASK_BHIST_OVWR)
+		pr_err("vfe40_irq: stats bhist overwrite\n");
+
+	if (errStatus & VFE40_IMASK_STATS_CS_OVWR)
+		pr_err("vfe40_irq: stats cs overwrite\n");
+
+	if (errStatus & VFE40_IMASK_STATS_IHIST_OVWR)
+		pr_err("vfe40_irq: stats ihist overwrite\n");
+
+	if (errStatus & VFE40_IMASK_REALIGN_BUF_Y_OVFL)
+		pr_err("vfe40_irq: realign bug Y overflow\n");
+
+	if (errStatus & VFE40_IMASK_REALIGN_BUF_CB_OVFL)
+		pr_err("vfe40_irq: realign bug CB overflow\n");
+
+	if (errStatus & VFE40_IMASK_REALIGN_BUF_CR_OVFL)
+		pr_err("vfe40_irq: realign bug CR overflow\n");
+
+	if (errStatus & VFE40_IMASK_VIOLATION) {
+		pr_err("vfe40_irq: violation interrupt\n");
+		reg_value = msm_camera_io_r(
+			axi_ctrl->share_ctrl->vfebase + VFE_VIOLATION_STATUS);
+		pr_err("%s: violationStatus  = 0x%x\n", __func__, reg_value);
+	}
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_0_BUS_OVFL)
+		pr_err("vfe40_irq: image master 0 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_1_BUS_OVFL)
+		pr_err("vfe40_irq: image master 1 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_2_BUS_OVFL)
+		pr_err("vfe40_irq: image master 2 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_3_BUS_OVFL)
+		pr_err("vfe40_irq: image master 3 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_4_BUS_OVFL)
+		pr_err("vfe40_irq: image master 4 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_5_BUS_OVFL)
+		pr_err("vfe40_irq: image master 5 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_6_BUS_OVFL)
+		pr_err("vfe40_irq: image master 6 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_AE_BG_BUS_OVFL)
+		pr_err("vfe40_irq: ae/bg stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_AF_BF_BUS_OVFL)
+		pr_err("vfe40_irq: af/bf stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_AWB_BUS_OVFL)
+		pr_err("vfe40_irq: awb stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_RS_BUS_OVFL)
+		pr_err("vfe40_irq: rs stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_CS_BUS_OVFL)
+		pr_err("vfe40_irq: cs stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_IHIST_BUS_OVFL)
+		pr_err("vfe40_irq: ihist stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_SKIN_BHIST_BUS_OVFL)
+		pr_err("vfe40_irq: skin/bhist stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_AXI_ERROR)
+		pr_err("vfe40_irq: axi error\n");
+}
+
+static uint32_t  vfe40_process_stats_irq_common(
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t statsNum, uint32_t newAddr)
+{
+	uint32_t pingpongStatus;
+	uint32_t returnAddr;
+	uint32_t pingpongAddr;
+
+	/* must be 0=ping, 1=pong */
+	pingpongStatus =
+		((msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_PING_PONG_STATUS))
+	& ((uint32_t)(1<<(statsNum + 7)))) >> (statsNum + 7);
+	/* stats bits starts at 7 */
+	CDBG("statsNum %d, pingpongStatus %d\n", statsNum, pingpongStatus);
+	pingpongAddr =
+		((uint32_t)(vfe40_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_PING_PONG_BASE)) +
+				(3*statsNum)*4 + (1-pingpongStatus)*4;
+	returnAddr = msm_camera_io_r((uint32_t *)pingpongAddr);
+	msm_camera_io_w(newAddr, (uint32_t *)pingpongAddr);
+	return returnAddr;
+}
+
+static void
+vfe_send_stats_msg(struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t bufAddress, uint32_t statsNum)
+{
+	int rc = 0;
+	void *vaddr = NULL;
+	/* fill message with right content. */
+	/* @todo This is causing issues, need further investigate */
+	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
+	struct isp_msg_stats msgStats;
+	msgStats.frameCounter = vfe40_ctrl->share_ctrl->vfeFrameId;
+	msgStats.buffer = bufAddress;
+
+	switch (statsNum) {
+	case statsAeNum:{
+		msgStats.id = MSG_ID_STATS_AEC;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AEC, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsAfNum:{
+		msgStats.id = MSG_ID_STATS_AF;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AF, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsAwbNum: {
+		msgStats.id = MSG_ID_STATS_AWB;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AWB, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+
+	case statsIhistNum: {
+		msgStats.id = MSG_ID_STATS_IHIST;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_IHIST, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsRsNum: {
+		msgStats.id = MSG_ID_STATS_RS;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_RS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsCsNum: {
+		msgStats.id = MSG_ID_STATS_CS;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_CS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+
+	default:
+		goto stats_done;
+	}
+	if (rc == 0) {
+		msgStats.buffer = (uint32_t)vaddr;
+		v4l2_subdev_notify(&vfe40_ctrl->subdev,
+			NOTIFY_VFE_MSG_STATS,
+			&msgStats);
+	} else {
+		pr_err("%s: paddr to idx mapping error, stats_id = %d, paddr = 0x%d",
+			 __func__, msgStats.id, msgStats.buffer);
+	}
+stats_done:
+	spin_unlock_irqrestore(&ctrl->state_lock, flags);
+	return;
+}
+
+static void vfe_send_comp_stats_msg(
+	struct vfe40_ctrl_type *vfe40_ctrl, uint32_t status_bits)
+{
+	struct msm_stats_buf msgStats;
+	uint32_t temp;
+
+	msgStats.frame_id = vfe40_ctrl->share_ctrl->vfeFrameId;
+	msgStats.status_bits = status_bits;
+
+	msgStats.aec.buff = vfe40_ctrl->aecStatsControl.bufToRender;
+	msgStats.awb.buff = vfe40_ctrl->awbStatsControl.bufToRender;
+	msgStats.af.buff = vfe40_ctrl->afStatsControl.bufToRender;
+
+	msgStats.ihist.buff = vfe40_ctrl->ihistStatsControl.bufToRender;
+	msgStats.rs.buff = vfe40_ctrl->rsStatsControl.bufToRender;
+	msgStats.cs.buff = vfe40_ctrl->csStatsControl.bufToRender;
+
+	temp = msm_camera_io_r(
+		vfe40_ctrl->share_ctrl->vfebase + VFE_STATS_AWB_SGW_CFG);
+	msgStats.awb_ymin = (0xFF00 & temp) >> 8;
+
+	v4l2_subdev_notify(&vfe40_ctrl->subdev,
+				NOTIFY_VFE_MSG_COMP_STATS,
+				&msgStats);
+}
+
+static void vfe40_process_stats_awb_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->awbStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(vfe40_ctrl, statsAwbNum,
+			addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->awbStatsControl.bufToRender, statsAwbNum);
+	} else{
+		vfe40_ctrl->awbStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->awbStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats_ihist_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->ihistStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(
+			vfe40_ctrl, statsIhistNum, addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->ihistStatsControl.bufToRender,
+			statsIhistNum);
+	} else {
+		vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats_rs_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->rsStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(vfe40_ctrl, statsRsNum,
+			addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->rsStatsControl.bufToRender, statsRsNum);
+	} else {
+		vfe40_ctrl->rsStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->rsStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats_cs_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->csStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(vfe40_ctrl, statsCsNum,
+			addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->csStatsControl.bufToRender, statsCsNum);
+	} else {
+		vfe40_ctrl->csStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->csStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats(struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t status_bits)
+{
+	unsigned long flags;
+	int32_t process_stats = false;
+	uint32_t addr;
+
+	CDBG("%s, stats = 0x%x\n", __func__, status_bits);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+			MSM_STATS_TYPE_AWB);
+		if (addr) {
+			vfe40_ctrl->awbStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsAwbNum,
+				addr);
+			process_stats = true;
+		} else{
+			vfe40_ctrl->awbStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->awbStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->awbStatsControl.bufToRender = 0;
+	}
+
+	if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+					MSM_STATS_TYPE_IHIST);
+		if (addr) {
+			vfe40_ctrl->ihistStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsIhistNum,
+				addr);
+			process_stats = true;
+		} else {
+			vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->ihistStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->ihistStatsControl.bufToRender = 0;
+	}
+
+	if (status_bits & VFE_IRQ_STATUS0_STATS_RS) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+					MSM_STATS_TYPE_RS);
+		if (addr) {
+			vfe40_ctrl->rsStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsRsNum,
+				addr);
+			process_stats = true;
+		} else {
+			vfe40_ctrl->rsStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->rsStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->rsStatsControl.bufToRender = 0;
+	}
+
+	if (status_bits & VFE_IRQ_STATUS0_STATS_CS) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+					MSM_STATS_TYPE_CS);
+		if (addr) {
+			vfe40_ctrl->csStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsCsNum,
+				addr);
+			process_stats = true;
+		} else {
+			vfe40_ctrl->csStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->csStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->csStatsControl.bufToRender = 0;
+	}
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (process_stats)
+		vfe_send_comp_stats_msg(vfe40_ctrl, status_bits);
+
+	return;
+}
+
+static void vfe40_process_stats_irq(
+	struct vfe40_ctrl_type *vfe40_ctrl, uint32_t irqstatus)
+{
+	uint32_t status_bits = VFE_COM_STATUS & irqstatus;
+
+	if ((vfe40_ctrl->hfr_mode != HFR_MODE_OFF) &&
+		(vfe40_ctrl->share_ctrl->vfeFrameId %
+		 vfe40_ctrl->hfr_mode != 0)) {
+		CDBG("Skip the stats when HFR enabled\n");
+		return;
+	}
+
+	vfe40_process_stats(vfe40_ctrl, status_bits);
+	return;
+}
+
+static void vfe40_process_irq(
+	struct vfe40_ctrl_type *vfe40_ctrl, uint32_t irqstatus)
+{
+	if (irqstatus &
+		VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0) {
+		vfe40_process_stats_irq(vfe40_ctrl, irqstatus);
+		return;
+	}
+
+	switch (irqstatus) {
+	case VFE_IRQ_STATUS0_CAMIF_SOF_MASK:
+		CDBG("irq	camifSofIrq\n");
+		vfe40_process_camif_sof_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_REG_UPDATE_MASK:
+		CDBG("irq	regUpdateIrq\n");
+		vfe40_process_reg_update_irq(vfe40_ctrl);
+		break;
+	case VFE_IMASK_WHILE_STOPPING_0:
+		CDBG("irq	resetAckIrq\n");
+		vfe40_process_reset_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_AWB:
+		CDBG("Stats AWB irq occured.\n");
+		vfe40_process_stats_awb_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_IHIST:
+		CDBG("Stats IHIST irq occured.\n");
+		vfe40_process_stats_ihist_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_RS:
+		CDBG("Stats RS irq occured.\n");
+		vfe40_process_stats_rs_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_CS:
+		CDBG("Stats CS irq occured.\n");
+		vfe40_process_stats_cs_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS1_SYNC_TIMER0:
+		CDBG("SYNC_TIMER 0 irq occured.\n");
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_SYNC_TIMER0_DONE);
+		break;
+	case VFE_IRQ_STATUS1_SYNC_TIMER1:
+		CDBG("SYNC_TIMER 1 irq occured.\n");
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_SYNC_TIMER1_DONE);
+		break;
+	case VFE_IRQ_STATUS1_SYNC_TIMER2:
+		CDBG("SYNC_TIMER 2 irq occured.\n");
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_SYNC_TIMER2_DONE);
+		break;
+	default:
+		pr_err("Invalid IRQ status\n");
+	}
+}
+
+static void axi40_do_tasklet(unsigned long data)
+{
+	unsigned long flags;
+	struct axi_ctrl_t *axi_ctrl = (struct axi_ctrl_t *)data;
+	struct vfe40_isr_queue_cmd *qcmd = NULL;
+
+	CDBG("=== axi40_do_tasklet start ===\n");
+
+	while (atomic_read(&axi_ctrl->share_ctrl->irq_cnt)) {
+		spin_lock_irqsave(&axi_ctrl->tasklet_lock, flags);
+		qcmd = list_first_entry(&axi_ctrl->tasklet_q,
+			struct vfe40_isr_queue_cmd, list);
+		atomic_sub(1, &axi_ctrl->share_ctrl->irq_cnt);
+
+		if (!qcmd) {
+			spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
+				flags);
+			return;
+		}
+
+		list_del(&qcmd->list);
+		spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
+			flags);
+
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_CAMIF_SOF_MASK)
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IRQ_STATUS0_CAMIF_SOF_MASK);
+
+		/* interrupt to be processed,  *qcmd has the payload.  */
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_REG_UPDATE_MASK) {
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IRQ_STATUS0_REG_UPDATE_MASK);
+		}
+
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IMASK_WHILE_STOPPING_0)
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IMASK_WHILE_STOPPING_0);
+
+		if (atomic_read(&axi_ctrl->share_ctrl->vstate)) {
+			if (qcmd->vfeInterruptStatus1 &
+					VFE40_IMASK_ERROR_ONLY_1) {
+				pr_err("irq	errorIrq\n");
+				vfe40_process_error_irq(
+					axi_ctrl,
+					qcmd->vfeInterruptStatus1 &
+					VFE40_IMASK_ERROR_ONLY_1);
+			}
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_AXI_IRQ,
+				(void *)qcmd->vfeInterruptStatus0);
+
+			/* then process stats irq. */
+			if (axi_ctrl->share_ctrl->stats_comp) {
+				/* process stats comb interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0) {
+					CDBG("Stats composite irq occured.\n");
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)qcmd->vfeInterruptStatus0);
+				}
+			} else {
+				/* process individual stats interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_AWB)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_AWB);
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_IHIST)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_IHIST);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_RS)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_RS);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_CS)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_CS);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS1_SYNC_TIMER0)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS1_SYNC_TIMER0);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS1_SYNC_TIMER1)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS1_SYNC_TIMER1);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS1_SYNC_TIMER2)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS1_SYNC_TIMER2);
+			}
+		}
+		kfree(qcmd);
+	}
+	CDBG("=== axi40_do_tasklet end ===\n");
+}
+
+static irqreturn_t vfe40_parse_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	struct vfe40_irq_status irq;
+	struct vfe40_isr_queue_cmd *qcmd;
+	struct axi_ctrl_t *axi_ctrl = data;
+
+	CDBG("vfe_parse_irq\n");
+
+	vfe40_read_irq_status(axi_ctrl, &irq);
+
+	if ((irq.vfeIrqStatus0 == 0) && (irq.vfeIrqStatus1 == 0)) {
+		CDBG("vfe_parse_irq: vfeIrqStatus0 & 1 are both 0!\n");
+		return IRQ_HANDLED;
+	}
+
+	qcmd = kzalloc(sizeof(struct vfe40_isr_queue_cmd),
+		GFP_ATOMIC);
+	if (!qcmd) {
+		pr_err("vfe_parse_irq: qcmd malloc failed!\n");
+		return IRQ_HANDLED;
+	}
+
+	spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+	if (axi_ctrl->share_ctrl->stop_ack_pending) {
+		irq.vfeIrqStatus0 &= VFE_IMASK_WHILE_STOPPING_0;
+		irq.vfeIrqStatus1 &= VFE_IMASK_WHILE_STOPPING_1;
+	}
+	spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+
+	CDBG("vfe_parse_irq: Irq_status0 = 0x%x, Irq_status1 = 0x%x.\n",
+		irq.vfeIrqStatus0, irq.vfeIrqStatus1);
+
+	qcmd->vfeInterruptStatus0 = irq.vfeIrqStatus0;
+	qcmd->vfeInterruptStatus1 = irq.vfeIrqStatus1;
+
+	spin_lock_irqsave(&axi_ctrl->tasklet_lock, flags);
+	list_add_tail(&qcmd->list, &axi_ctrl->tasklet_q);
+
+	atomic_add(1, &axi_ctrl->share_ctrl->irq_cnt);
+	spin_unlock_irqrestore(&axi_ctrl->tasklet_lock, flags);
+	tasklet_schedule(&axi_ctrl->vfe40_tasklet);
+	return IRQ_HANDLED;
+}
+
+
+static long vfe_stats_bufq_sub_ioctl(
+	struct vfe40_ctrl_type *vfe_ctrl,
+	struct msm_vfe_cfg_cmd *cmd, void *ion_client)
+{
+	long rc = 0;
+	switch (cmd->cmd_type) {
+	case VFE_CMD_STATS_REQBUF:
+	if (!vfe_ctrl->stats_ops.stats_ctrl) {
+		/* stats_ctrl has not been init yet */
+		rc = msm_stats_buf_ops_init(&vfe_ctrl->stats_ctrl,
+				(struct ion_client *)ion_client,
+				&vfe_ctrl->stats_ops);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats ops", __func__);
+			goto end;
+		}
+		rc = vfe_ctrl->stats_ops.stats_ctrl_init(&vfe_ctrl->stats_ctrl);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats_ctrl ops", __func__);
+			memset(&vfe_ctrl->stats_ops, 0,
+				sizeof(vfe_ctrl->stats_ops));
+			goto end;
+		}
+		if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats reqbuf input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				 __func__, cmd->length,
+				sizeof(struct msm_stats_reqbuf));
+			rc = -EINVAL ;
+			goto end;
+		}
+	}
+	rc = vfe_ctrl->stats_ops.reqbuf(
+			&vfe_ctrl->stats_ctrl,
+			(struct msm_stats_reqbuf *)cmd->value,
+			vfe_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_ENQUEUEBUF:
+	if (sizeof(struct msm_stats_buf_info) != cmd->length) {
+		/* error. the length not match */
+		pr_err("%s: stats enqueuebuf input size = %d,\n"
+			"struct size = %d, mitch match\n",
+			 __func__, cmd->length,
+			sizeof(struct msm_stats_buf_info));
+			rc = -EINVAL;
+			goto end;
+	}
+	rc = vfe_ctrl->stats_ops.enqueue_buf(
+			&vfe_ctrl->stats_ctrl,
+			(struct msm_stats_buf_info *)cmd->value,
+			vfe_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+	{
+		struct msm_stats_flush_bufq *flush_req = NULL;
+		flush_req = (struct msm_stats_flush_bufq *)cmd->value;
+		if (sizeof(struct msm_stats_flush_bufq) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats flush queue input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				__func__, cmd->length,
+				sizeof(struct msm_stats_flush_bufq));
+			rc = -EINVAL;
+			goto end;
+	}
+	rc = vfe_ctrl->stats_ops.bufq_flush(
+			&vfe_ctrl->stats_ctrl,
+			(enum msm_stats_enum_type)flush_req->stats_type,
+			vfe_ctrl->stats_ops.client);
+	}
+	break;
+	default:
+		rc = -1;
+		pr_err("%s: cmd_type %d not supported", __func__,
+			cmd->cmd_type);
+	break;
+	}
+end:
+	return rc;
+}
+
+static long msm_vfe_subdev_ioctl(struct v4l2_subdev *sd,
+			unsigned int subdev_cmd, void *arg)
+{
+	struct msm_cam_media_controller *pmctl =
+		(struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
+	struct vfe40_ctrl_type *vfe40_ctrl =
+		(struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+	struct msm_isp_cmd vfecmd;
+	struct msm_camvfe_params *vfe_params =
+		(struct msm_camvfe_params *)arg;
+	struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+	void *data = vfe_params->data;
+
+	long rc = 0;
+	struct vfe_cmd_stats_buf *scfg = NULL;
+	struct vfe_cmd_stats_ack *sack = NULL;
+
+	if (!vfe40_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return -EFAULT;
+	}
+
+	switch (cmd->cmd_type) {
+	case CMD_VFE_PROCESS_IRQ:
+		vfe40_process_irq(vfe40_ctrl, (uint32_t) data);
+		return rc;
+	case VFE_CMD_STATS_REQBUF:
+	case VFE_CMD_STATS_ENQUEUEBUF:
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+		/* for easy porting put in one envelope */
+		rc = vfe_stats_bufq_sub_ioctl(vfe40_ctrl,
+				cmd, vfe_params->data);
+		return rc;
+	default:
+		if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+			cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+			cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+			cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+				if (copy_from_user(&vfecmd,
+					(void __user *)(cmd->value),
+					sizeof(vfecmd))) {
+						pr_err("%s %d: copy_from_user failed\n",
+							__func__, __LINE__);
+					return -EFAULT;
+				}
+		} else {
+			/* here eith stats release or frame release. */
+			if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+				cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+				cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
+				/* then must be stats release. */
+				if (!data) {
+					pr_err("%s: data = NULL, cmd->cmd_type = %d",
+						__func__, cmd->cmd_type);
+					return -EFAULT;
+				}
+				sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
+							GFP_ATOMIC);
+				if (!sack) {
+					pr_err("%s: no mem for cmd->cmd_type = %d",
+					 __func__, cmd->cmd_type);
+					return -ENOMEM;
+				}
+				sack->nextStatsBuf = *(uint32_t *)data;
+			}
+		}
+		CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+		if ((cmd->cmd_type == CMD_STATS_AF_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_AWB_ENABLE)   ||
+			(cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+			(cmd->cmd_type == CMD_STATS_RS_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_CS_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
+				scfg = NULL;
+				/* individual */
+				goto vfe40_config_done;
+		}
+		switch (cmd->cmd_type) {
+		case CMD_GENERAL:
+			rc = vfe40_proc_general(pmctl, &vfecmd, vfe40_ctrl);
+		break;
+		case CMD_CONFIG_PING_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe40_output_ch *outch =
+				vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+			outch->ping = *((struct msm_free_buf *)data);
+		}
+		break;
+
+		case CMD_CONFIG_PONG_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe40_output_ch *outch =
+				vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+			outch->pong = *((struct msm_free_buf *)data);
+		}
+		break;
+
+		case CMD_CONFIG_FREE_BUF_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe40_output_ch *outch =
+				vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+			outch->free_buf = *((struct msm_free_buf *)data);
+		}
+		break;
+		case CMD_SNAP_BUF_RELEASE:
+			break;
+		default:
+			pr_err("%s Unsupported AXI configuration %x ", __func__,
+				cmd->cmd_type);
+		break;
+		}
+	}
+vfe40_config_done:
+	kfree(scfg);
+	kfree(sack);
+	CDBG("%s done: rc = %d\n", __func__, (int) rc);
+	return rc;
+}
+
+static const struct v4l2_subdev_core_ops msm_vfe_subdev_core_ops = {
+	.ioctl = msm_vfe_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_vfe_subdev_ops = {
+	.core = &msm_vfe_subdev_core_ops,
+};
+
+int msm_vfe_subdev_init(struct v4l2_subdev *sd,
+			struct msm_cam_media_controller *mctl)
+{
+	int rc = 0;
+	struct vfe40_ctrl_type *vfe40_ctrl =
+		(struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+	v4l2_set_subdev_hostdata(sd, mctl);
+
+	spin_lock_init(&vfe40_ctrl->share_ctrl->stop_flag_lock);
+	spin_lock_init(&vfe40_ctrl->state_lock);
+	spin_lock_init(&vfe40_ctrl->io_lock);
+	spin_lock_init(&vfe40_ctrl->update_ack_lock);
+	spin_lock_init(&vfe40_ctrl->stats_bufq_lock);
+
+
+	vfe40_ctrl->update_linear = false;
+	vfe40_ctrl->update_rolloff = false;
+	vfe40_ctrl->update_la = false;
+	vfe40_ctrl->update_gamma = false;
+	vfe40_ctrl->hfr_mode = HFR_MODE_OFF;
+
+	return rc;
+}
+
+void msm_vfe_subdev_release(struct v4l2_subdev *sd)
+{
+	struct vfe40_ctrl_type *vfe40_ctrl =
+		(struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+	if (!vfe40_ctrl->share_ctrl->vfebase)
+		vfe40_ctrl->share_ctrl->vfebase = NULL;
+}
+
+static const struct v4l2_subdev_internal_ops msm_vfe_internal_ops;
+
+static int __devinit vfe40_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct axi_ctrl_t *axi_ctrl;
+	struct vfe40_ctrl_type *vfe40_ctrl;
+	struct vfe_share_ctrl_t *share_ctrl;
+	struct msm_cam_subdev_info sd_info;
+	CDBG("%s: device id = %d\n", __func__, pdev->id);
+
+	share_ctrl = kzalloc(sizeof(struct vfe_share_ctrl_t), GFP_KERNEL);
+	if (!share_ctrl) {
+		pr_err("%s: no enough memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	axi_ctrl = kzalloc(sizeof(struct axi_ctrl_t), GFP_KERNEL);
+	if (!axi_ctrl) {
+		pr_err("%s: no enough memory\n", __func__);
+		kfree(share_ctrl);
+		return -ENOMEM;
+	}
+
+	vfe40_ctrl = kzalloc(sizeof(struct vfe40_ctrl_type), GFP_KERNEL);
+	if (!vfe40_ctrl) {
+		pr_err("%s: no enough memory\n", __func__);
+		kfree(share_ctrl);
+		kfree(axi_ctrl);
+		return -ENOMEM;
+	}
+
+	if (pdev->dev.of_node)
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+
+	share_ctrl->axi_ctrl = axi_ctrl;
+	share_ctrl->vfe40_ctrl = vfe40_ctrl;
+	axi_ctrl->share_ctrl = share_ctrl;
+	vfe40_ctrl->share_ctrl = share_ctrl;
+	axi_ctrl->pdev = pdev;
+	vfe40_axi_probe(axi_ctrl);
+
+	v4l2_subdev_init(&vfe40_ctrl->subdev, &msm_vfe_subdev_ops);
+	vfe40_ctrl->subdev.internal_ops = &msm_vfe_internal_ops;
+	vfe40_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(vfe40_ctrl->subdev.name,
+			 sizeof(vfe40_ctrl->subdev.name), "vfe4.0");
+	v4l2_set_subdevdata(&vfe40_ctrl->subdev, vfe40_ctrl);
+	platform_set_drvdata(pdev, &vfe40_ctrl->subdev);
+
+	axi_ctrl->vfemem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "vfe");
+	if (!axi_ctrl->vfemem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		rc = -ENODEV;
+		goto vfe40_no_resource;
+	}
+	axi_ctrl->vfeirq = platform_get_resource_byname(pdev,
+					IORESOURCE_IRQ, "vfe");
+	if (!axi_ctrl->vfeirq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		rc = -ENODEV;
+		goto vfe40_no_resource;
+	}
+
+	axi_ctrl->vfeio = request_mem_region(axi_ctrl->vfemem->start,
+		resource_size(axi_ctrl->vfemem), pdev->name);
+	if (!axi_ctrl->vfeio) {
+		pr_err("%s: no valid mem region\n", __func__);
+		rc = -EBUSY;
+		goto vfe40_no_resource;
+	}
+
+	rc = request_irq(axi_ctrl->vfeirq->start, vfe40_parse_irq,
+		IRQF_TRIGGER_RISING, "vfe", axi_ctrl);
+	if (rc < 0) {
+		release_mem_region(axi_ctrl->vfemem->start,
+			resource_size(axi_ctrl->vfemem));
+		pr_err("%s: irq request fail\n", __func__);
+		rc = -EBUSY;
+		goto vfe40_no_resource;
+	}
+
+	disable_irq(axi_ctrl->vfeirq->start);
+
+	tasklet_init(&axi_ctrl->vfe40_tasklet,
+		axi40_do_tasklet, (unsigned long)axi_ctrl);
+
+	vfe40_ctrl->pdev = pdev;
+	sd_info.sdev_type = VFE_DEV;
+	sd_info.sd_index = pdev->id;
+	sd_info.irq_num = axi_ctrl->vfeirq->start;
+	msm_cam_register_subdev_node(&vfe40_ctrl->subdev, &sd_info);
+	return 0;
+
+vfe40_no_resource:
+	kfree(vfe40_ctrl);
+	kfree(axi_ctrl);
+	return 0;
+}
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+	{.compatible = "qcom,vfe40"},
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+static struct platform_driver vfe40_driver = {
+	.probe = vfe40_probe,
+	.driver = {
+		.name = MSM_VFE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_vfe_dt_match,
+	},
+};
+
+static int __init msm_vfe40_init_module(void)
+{
+	return platform_driver_register(&vfe40_driver);
+}
+
+static void __exit msm_vfe40_exit_module(void)
+{
+	platform_driver_unregister(&vfe40_driver);
+}
+
+module_init(msm_vfe40_init_module);
+module_exit(msm_vfe40_exit_module);
+MODULE_DESCRIPTION("VFE 4.0 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/vfe/msm_vfe40.h b/drivers/media/video/msm/vfe/msm_vfe40.h
new file mode 100644
index 0000000..c8b0cb8
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40.h
@@ -0,0 +1,1202 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE40_H__
+#define __MSM_VFE40_H__
+
+#include <linux/bitops.h>
+#include "msm_vfe_stats_buf.h"
+
+#define TRUE  1
+#define FALSE 0
+
+#define VFE40_HW_NUMBER 0x10000015
+
+/* This defines total number registers in VFE.
+ * Each register is 4 bytes so to get the range,
+ * multiply this number with 4. */
+#define VFE40_REGISTER_TOTAL 0x00000320
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x10:
+ * disable image data capture immediately. */
+#define CAMIF_COMMAND_STOP_IMMEDIATELY  0x00000002
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x00:
+ * disable image data capture at frame boundary */
+#define CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY  0x00000000
+
+/* to halt axi bridge */
+#define AXI_HALT  0x00000001
+
+/* clear the halt bit. */
+#define AXI_HALT_CLEAR  0x00000000
+
+/* reset the pipeline when stop command is issued.
+ * (without reset the register.) bit 26-32 = 0,
+ * domain reset, bit 0-9 = 1 for module reset, except
+ * register module. */
+#define VFE_RESET_UPON_STOP_CMD  0x000003ef
+
+/* reset the pipeline when reset command.
+ * bit 26-32 = 0, domain reset, bit 0-9 = 1 for module reset. */
+#define VFE_RESET_UPON_RESET_CMD  0x000001ff
+
+/* constants for irq registers */
+#define VFE_DISABLE_ALL_IRQS 0
+/* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS.  */
+#define VFE_CLEAR_ALL_IRQ0   0xffff7fff
+#define VFE_CLEAR_ALL_IRQ1   0xffffffff
+
+#define VFE_IRQ_STATUS0_CAMIF_SOF_MASK            (0x00000001<<0)
+#define VFE_IRQ_STATUS0_REG_UPDATE_MASK           (0x00000001<<4)
+#define VFE_IRQ_STATUS0_STATS_BE                  (0x00000001<<16)
+#define VFE_IRQ_STATUS0_STATS_BG                  (0x00000001<<17)
+#define VFE_IRQ_STATUS0_STATS_BF                  (0x00000001<<18)
+#define VFE_IRQ_STATUS0_STATS_AWB                 (0x00000001<<19)
+#define VFE_IRQ_STATUS0_STATS_RS                  (0x00000001<<20)
+#define VFE_IRQ_STATUS0_STATS_CS                  (0x00000001<<21)
+#define VFE_IRQ_STATUS0_STATS_IHIST               (0x00000001<<22)
+#define VFE_IRQ_STATUS0_STATS_SKIN_BHIST          (0x00000001<<23)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK (0x00000001<<25)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK (0x00000001<<26)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK (0x00000001<<27)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE3_MASK (0x00000001<<28)
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0     (0x00000001<<29)
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_1     (0x00000001<<30)
+#define VFE_IRQ_STATUS0_RESET_AXI_HALT_ACK_MASK   (0x00000001<<31)
+
+#define VFE_IRQ_STATUS1_SYNC_TIMER0               (0x00000001<<25)
+#define VFE_IRQ_STATUS1_SYNC_TIMER1               (0x00000001<<26)
+#define VFE_IRQ_STATUS1_SYNC_TIMER2               (0x00000001<<27)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER0              (0x00000001<<28)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER1              (0x00000001<<29)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER2              (0x00000001<<30)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER3              (0x00000001<<31)
+
+/* imask for while waiting for stop ack,  driver has already
+ * requested stop, waiting for reset irq, and async timer irq.
+ * For irq_status_0, bit 28-32 are for async timer. For
+ * irq_status_1, bit 22 for reset irq, bit 23 for axi_halt_ack
+   irq */
+#define VFE_IMASK_WHILE_STOPPING_0  0x80000000
+#define VFE_IMASK_WHILE_STOPPING_1  0x00000100
+
+/* For ABF bit 4 is set to zero and other's 1 */
+#define ABF_MASK 0xFFFFFFF7
+
+/* For DBPC bit 0 is set to zero and other's 1 */
+#define DBPC_MASK 0xFFFFFFFE
+
+/* For DBPC bit 1 is set to zero and other's 1 */
+#define DBCC_MASK 0xFFFFFFFD
+
+/* For DBPC/ABF/DBCC/ABCC bits are set to 1 all others 0 */
+#define DEMOSAIC_MASK 0xF
+
+/* For MCE enable bit 28 set to zero and other's 1 */
+#define MCE_EN_MASK 0xEFFFFFFF
+
+/* For MCE Q_K bit 28 to 32 set to zero and other's 1 */
+#define MCE_Q_K_MASK 0x0FFFFFFF
+
+#define BE_ENABLE_MASK    (0x00000001<<5)
+#define BG_ENABLE_MASK    (0x00000001<<6)
+#define BF_ENABLE_MASK    (0x00000001<<7)
+#define AWB_ENABLE_MASK   (0x00000001<<8)
+#define RS_ENABLE_MASK    (0x00000001<<9)
+#define CS_ENABLE_MASK    (0x00000001<<10)
+#define CLF_ENABLE_MASK   (0x00000001<<12)
+#define IHIST_ENABLE_MASK (0x00000001<<15)
+#define RS_CS_ENABLE_MASK (RS_ENABLE_MASK|CS_ENABLE_MASK)
+#define STATS_ENABLE_MASK 0x000487E0   /* bit 18,15,10,9,8,7,6,5*/
+
+#define VFE_DMI_CFG_DEFAULT              0x00000100
+
+#define HFR_MODE_OFF 1
+#define VFE_FRAME_SKIP_PERIOD_MASK 0x0000001F /*bits 0 -4*/
+
+enum VFE40_DMI_RAM_SEL {
+	NO_MEM_SELECTED          = 0,
+	BLACK_LUT_RAM_BANK0      = 0x1,
+	BLACK_LUT_RAM_BANK1      = 0x2,
+	ROLLOFF_RAM0_BANK0       = 0x3,
+	ROLLOFF_RAM0_BANK1       = 0x4,
+	DEMOSAIC_LUT_RAM_BANK0   = 0x5,
+	DEMOSAIC_LUT_RAM_BANK1   = 0x6,
+	STATS_BHIST_RAM0         = 0x7,
+	STATS_BHIST_RAM1         = 0x8,
+	RGBLUT_RAM_CH0_BANK0     = 0x9,
+	RGBLUT_RAM_CH0_BANK1     = 0xa,
+	RGBLUT_RAM_CH1_BANK0     = 0xb,
+	RGBLUT_RAM_CH1_BANK1     = 0xc,
+	RGBLUT_RAM_CH2_BANK0     = 0xd,
+	RGBLUT_RAM_CH2_BANK1     = 0xe,
+	RGBLUT_CHX_BANK0         = 0xf,
+	RGBLUT_CHX_BANK1         = 0x10,
+	STATS_IHIST_RAM          = 0x11,
+	LUMA_ADAPT_LUT_RAM_BANK0 = 0x12,
+	LUMA_ADAPT_LUT_RAM_BANK1 = 0x13,
+};
+
+enum vfe_output_state {
+	VFE_STATE_IDLE,
+	VFE_STATE_START_REQUESTED,
+	VFE_STATE_STARTED,
+	VFE_STATE_STOP_REQUESTED,
+	VFE_STATE_STOPPED,
+};
+
+#define V40_CAMIF_OFF             0x000002F8
+#define V40_CAMIF_LEN             36
+
+#define V40_DEMUX_OFF             0x00000424
+#define V40_DEMUX_LEN             28
+
+#define V40_DEMOSAICV3_0_OFF      0x00000440
+#define V40_DEMOSAICV3_0_LEN      4
+#define V40_DEMOSAICV3_1_OFF      0x00000518
+#define V40_DEMOSAICV3_1_LEN      88
+#define V40_DEMOSAICV3_2_OFF      0x00000568
+#define V40_DEMOSAICV3_UP_REG_CNT 5
+
+#define V40_OUT_CLAMP_OFF         0x00000874
+#define V40_OUT_CLAMP_LEN         16
+
+#define V40_OPERATION_CFG_LEN     44
+
+#define V40_AXI_OUT_OFF           0x0000004C
+#define V40_AXI_OUT_LEN           412
+#define V40_AXI_CH_INF_LEN        32
+#define V40_AXI_CFG_LEN           71
+
+#define V40_FOV_ENC_OFF           0x00000854
+#define V40_FOV_ENC_LEN           16
+#define V40_FOV_VIEW_OFF          0x00000864
+#define V40_FOV_VIEW_LEN          16
+
+#define V40_SCALER_ENC_OFF 0x0000075C
+#define V40_SCALER_ENC_LEN 72
+
+#define V40_SCALER_VIEW_OFF 0x000007A4
+#define V40_SCALER_VIEW_LEN 72
+
+#define V40_COLORXFORM_ENC_CFG_OFF 0x0000071C
+#define V40_COLORXFORM_ENC_CFG_LEN 32
+
+#define V40_COLORXFORM_VIEW_CFG_OFF 0x0000073C
+#define V40_COLORXFORM_VIEW_CFG_LEN 32
+
+#define V40_CHROMA_EN_OFF 0x00000640
+#define V40_CHROMA_EN_LEN 36
+
+#define V40_SYNC_TIMER_OFF      0x00000324
+#define V40_SYNC_TIMER_POLARITY_OFF 0x0000034C
+#define V40_TIMER_SELECT_OFF        0x00000374
+#define V40_SYNC_TIMER_LEN 28
+
+#define V40_ASYNC_TIMER_OFF 0x00000350
+#define V40_ASYNC_TIMER_LEN 28
+
+/* use 10x13 mesh table in vfe40*/
+#define V40_MESH_ROLL_OFF_CFG_OFF             0x00000400
+#define V40_MESH_ROLL_OFF_CFG_LEN             36
+#define V40_MESH_ROLL_OFF_TABLE_SIZE          130
+
+
+#define V40_COLOR_COR_OFF 0x000005D0
+#define V40_COLOR_COR_LEN 52
+
+#define V40_WB_OFF 0x00000580
+#define V40_WB_LEN 4
+
+#define V40_RGB_G_OFF 0x00000638
+#define V40_RGB_G_LEN 4
+#define V40_GAMMA_LUT_BANK_SEL_MASK           0x00000007
+
+#define V40_LA_OFF 0x0000063C
+#define V40_LA_LEN 4
+
+#define V40_SCE_OFF 0x00000694
+#define V40_SCE_LEN 136
+
+#define V40_CHROMA_SUP_OFF 0x00000664
+#define V40_CHROMA_SUP_LEN 12
+
+#define V40_MCE_OFF 0x00000670
+#define V40_MCE_LEN 36
+
+#define V40_STATS_BE_OFF 0x0000088C
+#define V40_STATS_BE_LEN 12
+
+#define V40_STATS_BG_OFF 0x00000898
+#define V40_STATS_BG_LEN 12
+
+#define V40_STATS_BF_OFF 0x000008A4
+#define V40_STATS_BF_LEN 24
+
+#define V40_STATS_BHIST_OFF 0x000008BC
+#define V40_STATS_BHIST_LEN 8
+
+#define V40_STATS_AWB_OFF 0x000008C4
+#define V40_STATS_AWB_LEN 32
+
+#define V40_STATS_RS_OFF 0x000008E4
+#define V40_STATS_RS_LEN 8
+
+#define V40_STATS_CS_OFF 0x000008EC
+#define V40_STATS_CS_LEN 8
+
+#define V40_STATS_IHIST_OFF 0x000008F4
+#define V40_STATS_IHIST_LEN 8
+
+#define V40_STATS_SKIN_OFF 0x000008FC
+#define V40_STATS_SKIN_LEN 20
+
+#define V40_ASF_OFF 0x000007EC
+#define V40_ASF_LEN 48
+#define V40_ASF_UPDATE_LEN 36
+
+#define V40_CAPTURE_LEN 4
+
+#define V40_GET_HW_VERSION_OFF 0
+#define V40_GET_HW_VERSION_LEN 4
+
+#define V40_LINEARIZATION_OFF1 0x0000037C
+#define V40_LINEARIZATION_LEN1 68
+
+#define V40_DEMOSAICV3_DBPC_CFG_OFF  0x00000444
+#define V40_DEMOSAICV3_DBPC_LEN 4
+
+#define V40_DEMOSAICV3_DBPC_CFG_OFF0 0x00000448
+#define V40_DEMOSAICV3_DBPC_CFG_OFF1 0x0000044C
+#define V40_DEMOSAICV3_DBPC_CFG_OFF2 0x00000450
+
+#define V40_DEMOSAICV3_DBCC_OFF 0x00000454
+#define V40_DEMOSAICV3_DBCC_LEN 16
+
+#define V40_DEMOSAICV3_ABF_OFF 0x00000464
+#define V40_DEMOSAICV3_ABF_LEN 180
+
+#define V40_MODULE_CFG_OFF 0x00000018
+#define V40_MODULE_CFG_LEN 4
+
+#define V40_ASF_SPECIAL_EFX_CFG_OFF 0x0000081C
+#define V40_ASF_SPECIAL_EFX_CFG_LEN 4
+
+#define V40_CLF_CFG_OFF 0x00000588
+#define V40_CLF_CFG_LEN 72
+
+#define V40_CLF_LUMA_UPDATE_OFF 0x0000058C
+#define V40_CLF_LUMA_UPDATE_LEN 60
+
+#define V40_CLF_CHROMA_UPDATE_OFF 0x000005C8
+#define V40_CLF_CHROMA_UPDATE_LEN 8
+
+#define VFE40_GAMMA_NUM_ENTRIES  64
+
+#define VFE40_LA_TABLE_LENGTH    64
+
+#define VFE40_LINEARIZATON_TABLE_LENGTH    36
+
+#define VFE_WM_CFG_BASE 0x0070
+#define VFE_WM_CFG_LEN 0x0024
+
+#define vfe40_get_ch_ping_addr(base, chn) \
+	(msm_camera_io_r((base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn)))
+#define vfe40_get_ch_pong_addr(base, chn) \
+	(msm_camera_io_r((base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn) + 4))
+#define vfe40_get_ch_addr(ping_pong, base, chn) \
+	((((ping_pong) & (1 << (chn))) == 0) ? \
+	(vfe40_get_ch_pong_addr((base), chn)) : \
+	(vfe40_get_ch_ping_addr((base), chn)))
+
+#define vfe40_put_ch_ping_addr(base, chn, addr) \
+	(msm_camera_io_w((addr), \
+	(base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn)))
+#define vfe40_put_ch_pong_addr(base, chn, addr) \
+	(msm_camera_io_w((addr), \
+	(base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn) + 4))
+#define vfe40_put_ch_addr(ping_pong, base, chn, addr) \
+	(((ping_pong) & (1 << (chn))) == 0 ?   \
+	vfe40_put_ch_pong_addr((base), (chn), (addr)) : \
+	vfe40_put_ch_ping_addr((base), (chn), (addr)))
+
+struct vfe_cmd_hw_version {
+	uint32_t minorVersion;
+	uint32_t majorVersion;
+	uint32_t coreVersion;
+};
+
+enum VFE_AXI_OUTPUT_MODE {
+	VFE_AXI_OUTPUT_MODE_Output1,
+	VFE_AXI_OUTPUT_MODE_Output2,
+	VFE_AXI_OUTPUT_MODE_Output1AndOutput2,
+	VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2,
+	VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1,
+	VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2,
+	VFE_AXI_LAST_OUTPUT_MODE_ENUM
+};
+
+enum VFE_RAW_WR_PATH_SEL {
+	VFE_RAW_OUTPUT_DISABLED,
+	VFE_RAW_OUTPUT_ENC_CBCR_PATH,
+	VFE_RAW_OUTPUT_VIEW_CBCR_PATH,
+	VFE_RAW_OUTPUT_PATH_INVALID
+};
+
+
+#define VFE_AXI_OUTPUT_BURST_LENGTH     4
+#define VFE_MAX_NUM_FRAGMENTS_PER_FRAME 4
+#define VFE_AXI_OUTPUT_CFG_FRAME_COUNT  3
+
+struct vfe_cmds_per_write_master {
+	uint16_t imageWidth;
+	uint16_t imageHeight;
+	uint16_t outRowCount;
+	uint16_t outRowIncrement;
+	uint32_t outFragments[VFE_AXI_OUTPUT_CFG_FRAME_COUNT]
+		[VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+};
+
+struct vfe_cmds_axi_per_output_path {
+	uint8_t fragmentCount;
+	struct vfe_cmds_per_write_master firstWM;
+	struct vfe_cmds_per_write_master secondWM;
+};
+
+enum VFE_AXI_BURST_LENGTH {
+	VFE_AXI_BURST_LENGTH_IS_2  = 2,
+	VFE_AXI_BURST_LENGTH_IS_4  = 4,
+	VFE_AXI_BURST_LENGTH_IS_8  = 8,
+	VFE_AXI_BURST_LENGTH_IS_16 = 16
+};
+
+
+struct vfe_cmd_fov_crop_config {
+	uint8_t enable;
+	uint16_t firstPixel;
+	uint16_t lastPixel;
+	uint16_t firstLine;
+	uint16_t lastLine;
+};
+
+struct vfe_cmds_main_scaler_stripe_init {
+	uint16_t MNCounterInit;
+	uint16_t phaseInit;
+};
+
+struct vfe_cmds_scaler_one_dimension {
+	uint8_t  enable;
+	uint16_t inputSize;
+	uint16_t outputSize;
+	uint32_t phaseMultiplicationFactor;
+	uint8_t  interpolationResolution;
+};
+
+struct vfe_cmd_main_scaler_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension    hconfig;
+	struct vfe_cmds_scaler_one_dimension    vconfig;
+	struct vfe_cmds_main_scaler_stripe_init MNInitH;
+	struct vfe_cmds_main_scaler_stripe_init MNInitV;
+};
+
+struct vfe_cmd_scaler2_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension hconfig;
+	struct vfe_cmds_scaler_one_dimension vconfig;
+};
+
+
+struct vfe_cmd_frame_skip_update {
+	uint32_t output1Pattern;
+	uint32_t output2Pattern;
+};
+
+struct vfe_cmd_output_clamp_config {
+	uint8_t minCh0;
+	uint8_t minCh1;
+	uint8_t minCh2;
+	uint8_t maxCh0;
+	uint8_t maxCh1;
+	uint8_t maxCh2;
+};
+
+struct vfe_cmd_chroma_subsample_config {
+	uint8_t enable;
+	uint8_t cropEnable;
+	uint8_t vsubSampleEnable;
+	uint8_t hsubSampleEnable;
+	uint8_t vCosited;
+	uint8_t hCosited;
+	uint8_t vCositedPhase;
+	uint8_t hCositedPhase;
+	uint16_t cropWidthFirstPixel;
+	uint16_t cropWidthLastPixel;
+	uint16_t cropHeightFirstLine;
+	uint16_t cropHeightLastLine;
+};
+
+enum VFE_START_PIXEL_PATTERN {
+	VFE_BAYER_RGRGRG,
+	VFE_BAYER_GRGRGR,
+	VFE_BAYER_BGBGBG,
+	VFE_BAYER_GBGBGB,
+	VFE_YUV_YCbYCr,
+	VFE_YUV_YCrYCb,
+	VFE_YUV_CbYCrY,
+	VFE_YUV_CrYCbY
+};
+
+enum VFE_BUS_RD_INPUT_PIXEL_PATTERN {
+	VFE_BAYER_RAW,
+	VFE_YUV_INTERLEAVED,
+	VFE_YUV_PSEUDO_PLANAR_Y,
+	VFE_YUV_PSEUDO_PLANAR_CBCR
+};
+
+enum VFE_YUV_INPUT_COSITING_MODE {
+	VFE_YUV_COSITED,
+	VFE_YUV_INTERPOLATED
+};
+
+struct vfe_cmds_demosaic_abf {
+	uint8_t   enable;
+	uint8_t   forceOn;
+	uint8_t   shift;
+	uint16_t  lpThreshold;
+	uint16_t  max;
+	uint16_t  min;
+	uint8_t   ratio;
+};
+
+struct vfe_cmds_demosaic_bpc {
+	uint8_t   enable;
+	uint16_t  fmaxThreshold;
+	uint16_t  fminThreshold;
+	uint16_t  redDiffThreshold;
+	uint16_t  blueDiffThreshold;
+	uint16_t  greenDiffThreshold;
+};
+
+struct vfe_cmd_demosaic_config {
+	uint8_t   enable;
+	uint8_t   slopeShift;
+	struct vfe_cmds_demosaic_abf abfConfig;
+	struct vfe_cmds_demosaic_bpc bpcConfig;
+};
+
+struct vfe_cmd_demosaic_bpc_update {
+	struct vfe_cmds_demosaic_bpc bpcUpdate;
+};
+
+struct vfe_cmd_demosaic_abf_update {
+	struct vfe_cmds_demosaic_abf abfUpdate;
+};
+
+struct vfe_cmd_white_balance_config {
+	uint8_t  enable;
+	uint16_t ch2Gain;
+	uint16_t ch1Gain;
+	uint16_t ch0Gain;
+};
+
+enum VFE_COLOR_CORRECTION_COEF_QFACTOR {
+	COEF_IS_Q7_SIGNED,
+	COEF_IS_Q8_SIGNED,
+	COEF_IS_Q9_SIGNED,
+	COEF_IS_Q10_SIGNED
+};
+
+struct vfe_cmd_color_correction_config {
+	uint8_t     enable;
+	enum VFE_COLOR_CORRECTION_COEF_QFACTOR coefQFactor;
+	int16_t  C0;
+	int16_t  C1;
+	int16_t  C2;
+	int16_t  C3;
+	int16_t  C4;
+	int16_t  C5;
+	int16_t  C6;
+	int16_t  C7;
+	int16_t  C8;
+	int16_t  K0;
+	int16_t  K1;
+	int16_t  K2;
+};
+
+#define VFE_LA_TABLE_LENGTH 64
+
+struct vfe_cmd_la_config {
+	uint8_t enable;
+	int16_t table[VFE_LA_TABLE_LENGTH];
+};
+
+#define VFE_GAMMA_TABLE_LENGTH 256
+enum VFE_RGB_GAMMA_TABLE_SELECT {
+	RGB_GAMMA_CH0_SELECTED,
+	RGB_GAMMA_CH1_SELECTED,
+	RGB_GAMMA_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_SELECTED,
+	RGB_GAMMA_CH0_CH2_SELECTED,
+	RGB_GAMMA_CH1_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_CH2_SELECTED
+};
+
+struct vfe_cmd_rgb_gamma_config {
+	uint8_t enable;
+	enum VFE_RGB_GAMMA_TABLE_SELECT channelSelect;
+	int16_t table[VFE_GAMMA_TABLE_LENGTH];
+};
+
+struct vfe_cmd_chroma_enhan_config {
+	uint8_t  enable;
+	int16_t am;
+	int16_t ap;
+	int16_t bm;
+	int16_t bp;
+	int16_t cm;
+	int16_t cp;
+	int16_t dm;
+	int16_t dp;
+	int16_t kcr;
+	int16_t kcb;
+	int16_t RGBtoYConversionV0;
+	int16_t RGBtoYConversionV1;
+	int16_t RGBtoYConversionV2;
+	uint8_t RGBtoYConversionOffset;
+};
+
+struct vfe_cmd_chroma_suppression_config {
+	uint8_t enable;
+	uint8_t m1;
+	uint8_t m3;
+	uint8_t n1;
+	uint8_t n3;
+	uint8_t nn1;
+	uint8_t mm1;
+};
+
+struct vfe_cmd_asf_config {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t sharpThreshE2;
+	int8_t sharpThreshE3;
+	int8_t sharpThreshE4;
+	int8_t sharpThreshE5;
+	int8_t filter1Coefficients[9];
+	int8_t filter2Coefficients[9];
+	uint8_t  cropEnable;
+	uint16_t cropFirstPixel;
+	uint16_t cropLastPixel;
+	uint16_t cropFirstLine;
+	uint16_t cropLastLine;
+};
+
+struct vfe_cmd_asf_update {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t  sharpThreshE2;
+	int8_t  sharpThreshE3;
+	int8_t  sharpThreshE4;
+	int8_t  sharpThreshE5;
+	int8_t  filter1Coefficients[9];
+	int8_t  filter2Coefficients[9];
+	uint8_t cropEnable;
+};
+
+enum VFE_TEST_GEN_SYNC_EDGE {
+	VFE_TEST_GEN_SYNC_EDGE_ActiveHigh,
+	VFE_TEST_GEN_SYNC_EDGE_ActiveLow
+};
+
+
+struct vfe_cmd_bus_pm_start {
+	uint8_t output2YWrPmEnable;
+	uint8_t output2CbcrWrPmEnable;
+	uint8_t output1YWrPmEnable;
+	uint8_t output1CbcrWrPmEnable;
+};
+
+struct  vfe_frame_skip_counts {
+	uint32_t  totalFrameCount;
+	uint32_t  output1Count;
+	uint32_t  output2Count;
+};
+
+enum VFE_AXI_RD_UNPACK_HBI_SEL {
+	VFE_AXI_RD_HBI_32_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_64_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_128_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_256_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_512_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_1024_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_2048_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_4096_CLOCK_CYCLES
+};
+
+struct vfe_frame_bpc_info {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+};
+
+struct vfe_frame_asf_info {
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+};
+
+struct vfe_msg_camif_status {
+	uint8_t  camifState;
+	uint32_t pixelCount;
+	uint32_t lineCount;
+};
+
+struct vfe40_irq_status {
+	uint32_t vfeIrqStatus0;
+	uint32_t vfeIrqStatus1;
+	uint32_t camifStatus;
+	uint32_t demosaicStatus;
+	uint32_t asfMaxEdge;
+};
+
+#define V40_PREVIEW_AXI_FLAG  0x00000001
+#define V40_SNAPSHOT_AXI_FLAG (0x00000001<<1)
+
+struct vfe40_cmd_type {
+	uint16_t id;
+	uint32_t length;
+	uint32_t offset;
+	uint32_t flag;
+};
+
+struct vfe40_free_buf {
+	struct list_head node;
+	uint32_t paddr;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+};
+
+struct vfe40_output_ch {
+	struct list_head free_buf_queue;
+	spinlock_t free_buf_lock;
+	uint16_t image_mode;
+	int8_t ch0;
+	int8_t ch1;
+	int8_t ch2;
+	uint32_t  capture_cnt;
+	uint32_t  frame_drop_cnt;
+	struct msm_free_buf ping;
+	struct msm_free_buf pong;
+	struct msm_free_buf free_buf;
+};
+
+/* no error irq in mask 0 */
+#define VFE40_IMASK_ERROR_ONLY_0  0x0
+/* when normal case, don't want to block error status. */
+/* bit 0-21 are error irq bits */
+#define VFE40_IMASK_ERROR_ONLY_1               0x005FFFFF
+#define VFE40_IMASK_CAMIF_ERROR               (0x00000001<<0)
+#define VFE40_IMASK_BHIST_OVWR                (0x00000001<<1)
+#define VFE40_IMASK_STATS_CS_OVWR             (0x00000001<<2)
+#define VFE40_IMASK_STATS_IHIST_OVWR          (0x00000001<<3)
+#define VFE40_IMASK_REALIGN_BUF_Y_OVFL        (0x00000001<<4)
+#define VFE40_IMASK_REALIGN_BUF_CB_OVFL       (0x00000001<<5)
+#define VFE40_IMASK_REALIGN_BUF_CR_OVFL       (0x00000001<<6)
+#define VFE40_IMASK_VIOLATION                 (0x00000001<<7)
+#define VFE40_IMASK_IMG_MAST_0_BUS_OVFL       (0x00000001<<8)
+#define VFE40_IMASK_IMG_MAST_1_BUS_OVFL       (0x00000001<<9)
+#define VFE40_IMASK_IMG_MAST_2_BUS_OVFL       (0x00000001<<10)
+#define VFE40_IMASK_IMG_MAST_3_BUS_OVFL       (0x00000001<<11)
+#define VFE40_IMASK_IMG_MAST_4_BUS_OVFL       (0x00000001<<12)
+#define VFE40_IMASK_IMG_MAST_5_BUS_OVFL       (0x00000001<<13)
+#define VFE40_IMASK_IMG_MAST_6_BUS_OVFL       (0x00000001<<14)
+#define VFE40_IMASK_STATS_AE_BG_BUS_OVFL      (0x00000001<<15)
+#define VFE40_IMASK_STATS_AF_BF_BUS_OVFL      (0x00000001<<16)
+#define VFE40_IMASK_STATS_AWB_BUS_OVFL        (0x00000001<<17)
+#define VFE40_IMASK_STATS_RS_BUS_OVFL         (0x00000001<<18)
+#define VFE40_IMASK_STATS_CS_BUS_OVFL         (0x00000001<<19)
+#define VFE40_IMASK_STATS_IHIST_BUS_OVFL      (0x00000001<<20)
+#define VFE40_IMASK_STATS_SKIN_BHIST_BUS_OVFL (0x00000001<<21)
+#define VFE40_IMASK_AXI_ERROR                 (0x00000001<<22)
+
+#define VFE_COM_STATUS 0x000FE000
+
+struct vfe40_output_path {
+	uint16_t output_mode;     /* bitmask  */
+
+	struct vfe40_output_ch out0; /* preview and thumbnail */
+	struct vfe40_output_ch out1; /* snapshot */
+	struct vfe40_output_ch out2; /* video    */
+};
+
+struct vfe40_frame_extra {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+
+	uint32_t yWrPmStats0;
+	uint32_t yWrPmStats1;
+	uint32_t cbcrWrPmStats0;
+	uint32_t cbcrWrPmStats1;
+
+	uint32_t  frameCounter;
+};
+
+#define VFE_CLEAR_ALL_IRQS              0xffffffff
+
+#define VFE_HW_VERSION			        0x00000000
+#define VFE_GLOBAL_RESET                0x0000000C
+#define VFE_MODULE_RESET                0x00000010
+#define VFE_CGC_OVERRIDE                0x00000014
+#define VFE_MODULE_CFG                  0x00000018
+#define VFE_CFG				            0x0000001C
+#define VFE_IRQ_CMD                     0x00000024
+#define VFE_IRQ_MASK_0                  0x00000028
+#define VFE_IRQ_MASK_1                  0x0000002C
+#define VFE_IRQ_CLEAR_0                 0x00000030
+#define VFE_IRQ_CLEAR_1                 0x00000034
+#define VFE_IRQ_STATUS_0                0x00000038
+#define VFE_IRQ_STATUS_1                0x0000003C
+#define VFE_IRQ_COMP_MASK               0x00000040
+#define VFE_BUS_CMD                     0x0000004C
+#define VFE_BUS_PING_PONG_STATUS        0x00000180
+#define VFE_AXI_CMD                     0x000001D8
+#define VFE_AXI_STATUS        0x000002C0
+#define VFE_BUS_STATS_PING_PONG_BASE    0x000000F4
+
+#define VFE_BUS_STATS_AEC_WR_PING_ADDR    0x000000F4
+#define VFE_BUS_STATS_AEC_WR_PONG_ADDR    0x000000F8
+#define VFE_BUS_STATS_AEC_UB_CFG          0x000000FC
+#define VFE_BUS_STATS_AF_WR_PING_ADDR     0x00000100
+#define VFE_BUS_STATS_AF_WR_PONG_ADDR     0x00000104
+#define VFE_BUS_STATS_AF_UB_CFG           0x00000108
+#define VFE_BUS_STATS_AWB_WR_PING_ADDR    0x0000010C
+#define VFE_BUS_STATS_AWB_WR_PONG_ADDR    0x00000110
+#define VFE_BUS_STATS_AWB_UB_CFG          0x00000114
+#define VFE_BUS_STATS_RS_WR_PING_ADDR    0x00000118
+#define VFE_BUS_STATS_RS_WR_PONG_ADDR    0x0000011C
+#define VFE_BUS_STATS_RS_UB_CFG          0x00000120
+#define VFE_BUS_STATS_CS_WR_PING_ADDR    0x00000124
+#define VFE_BUS_STATS_CS_WR_PONG_ADDR    0x00000128
+#define VFE_BUS_STATS_CS_UB_CFG          0x0000012C
+#define VFE_BUS_STATS_HIST_WR_PING_ADDR   0x00000130
+#define VFE_BUS_STATS_HIST_WR_PONG_ADDR   0x00000134
+#define VFE_BUS_STATS_HIST_UB_CFG          0x00000138
+#define VFE_BUS_STATS_SKIN_WR_PING_ADDR    0x0000013C
+#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR    0x00000140
+#define VFE_BUS_STATS_SKIN_UB_CFG          0x00000144
+
+#define VFE_0_BUS_BDG_QOS_CFG_0     0x000002C4
+#define VFE_0_BUS_BDG_QOS_CFG_1     0x000002C8
+#define VFE_0_BUS_BDG_QOS_CFG_2     0x000002CC
+#define VFE_0_BUS_BDG_QOS_CFG_3     0x000002D0
+#define VFE_0_BUS_BDG_QOS_CFG_4     0x000002D4
+#define VFE_0_BUS_BDG_QOS_CFG_5     0x000002D8
+#define VFE_0_BUS_BDG_QOS_CFG_6     0x000002DC
+#define VFE_0_BUS_BDG_QOS_CFG_7     0x000002E0
+
+#define VFE_CAMIF_COMMAND               0x000002F4
+#define VFE_CAMIF_STATUS                0x0000031C
+#define VFE_REG_UPDATE_CMD              0x00000378
+#define VFE_DEMUX_GAIN_0                0x00000428
+#define VFE_DEMUX_GAIN_1                0x0000042C
+#define VFE_CHROMA_UP                   0x0000057C
+
+#define VFE_CLAMP_ENC_MAX               0x00000874
+#define VFE_CLAMP_ENC_MIN               0x00000878
+#define VFE_CLAMP_VIEW_MAX              0x0000087C
+#define VFE_CLAMP_VIEW_MIN              0x00000880
+
+#define VFE_REALIGN_BUF                 0x00000884
+#define VFE_STATS_CFG                   0x00000888
+#define VFE_STATS_AWB_SGW_CFG           0x000008CC
+#define VFE_DMI_CFG                     0x00000910
+#define VFE_DMI_ADDR                    0x00000914
+#define VFE_DMI_DATA_LO                 0x0000091C
+#define VFE_BUS_IO_FORMAT_CFG           0x00000054
+#define VFE_RDI0_CFG                    0x000002E8
+#define VFE_RDI1_CFG                    0x000002EC
+#define VFE_RDI2_CFG                    0x000002F0
+
+#define VFE_VIOLATION_STATUS            0x000007B4
+
+#define VFE40_DMI_DATA_HI               0x00000918
+#define VFE40_DMI_DATA_LO               0x0000091C
+
+#define VFE40_OUTPUT_MODE_PT			BIT(0)
+#define VFE40_OUTPUT_MODE_S			BIT(1)
+#define VFE40_OUTPUT_MODE_V			BIT(2)
+#define VFE40_OUTPUT_MODE_P			BIT(3)
+#define VFE40_OUTPUT_MODE_T			BIT(4)
+#define VFE40_OUTPUT_MODE_P_ALL_CHNLS		BIT(5)
+#define VFE40_OUTPUT_MODE_PRIMARY		BIT(6)
+#define VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS	BIT(7)
+#define VFE40_OUTPUT_MODE_SECONDARY		BIT(8)
+#define VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS	BIT(9)
+
+struct vfe_stats_control {
+	uint32_t droppedStatsFrameCount;
+	uint32_t bufToRender;
+};
+struct axi_ctrl_t;
+struct vfe40_ctrl_type;
+
+struct vfe_share_ctrl_t {
+	void __iomem *vfebase;
+	uint32_t register_total;
+
+	atomic_t vstate;
+	uint32_t vfeFrameId;
+	uint32_t stats_comp;
+	spinlock_t  stop_flag_lock;
+	int8_t stop_ack_pending;
+	enum vfe_output_state liveshot_state;
+	uint32_t vfe_capture_count;
+
+	uint16_t operation_mode;     /* streaming or snapshot */
+	struct vfe40_output_path outpath;
+
+	uint32_t ref_count;
+	spinlock_t  sd_notify_lock;
+	uint32_t vfe_clk_rate;
+
+	atomic_t irq_cnt;
+	struct axi_ctrl_t *axi_ctrl;
+	struct vfe40_ctrl_type *vfe40_ctrl;
+};
+
+struct axi_ctrl_t {
+	struct v4l2_subdev subdev;
+	struct platform_device *pdev;
+	struct resource *vfeirq;
+	spinlock_t  tasklet_lock;
+	struct list_head tasklet_q;
+
+	void *syncdata;
+
+	struct resource	*vfemem;
+	struct resource *vfeio;
+	struct regulator *fs_vfe;
+	struct clk *vfe_clk[3];
+	struct tasklet_struct vfe40_tasklet;
+	struct vfe_share_ctrl_t *share_ctrl;
+};
+
+struct vfe40_ctrl_type {
+	uint32_t vfeImaskCompositePacked;
+
+	spinlock_t  update_ack_lock;
+	spinlock_t  state_lock;
+	spinlock_t  io_lock;
+	spinlock_t  stats_bufq_lock;
+	uint32_t extlen;
+	void *extdata;
+
+	int8_t start_ack_pending;
+	int8_t reset_ack_pending;
+	int8_t update_ack_pending;
+	enum vfe_output_state recording_state;
+	int8_t update_linear;
+	int8_t update_rolloff;
+	int8_t update_la;
+	int8_t update_gamma;
+
+	struct vfe_share_ctrl_t *share_ctrl;
+
+	uint32_t sync_timer_repeat_count;
+	uint32_t sync_timer_state;
+	uint32_t sync_timer_number;
+
+	uint32_t output1Pattern;
+	uint32_t output1Period;
+	uint32_t output2Pattern;
+	uint32_t output2Period;
+	uint32_t vfeFrameSkipCount;
+	uint32_t vfeFrameSkipPeriod;
+	struct vfe_stats_control afStatsControl;
+	struct vfe_stats_control awbStatsControl;
+	struct vfe_stats_control aecStatsControl;
+	struct vfe_stats_control ihistStatsControl;
+	struct vfe_stats_control rsStatsControl;
+	struct vfe_stats_control csStatsControl;
+
+	/* v4l2 subdev */
+	struct v4l2_subdev subdev;
+	struct platform_device *pdev;
+	uint32_t hfr_mode;
+	uint32_t frame_skip_cnt;
+	uint32_t frame_skip_pattern;
+	uint32_t snapshot_frame_cnt;
+	struct msm_stats_bufq_ctrl stats_ctrl;
+	struct msm_stats_ops stats_ops;
+};
+
+#define statsAeNum      0
+#define statsAfNum      1
+#define statsAwbNum     2
+#define statsRsNum      3
+#define statsCsNum      4
+#define statsIhistNum   5
+#define statsSkinNum    6
+
+struct vfe_cmd_stats_ack {
+	uint32_t  nextStatsBuf;
+};
+
+#define VFE_STATS_BUFFER_COUNT            3
+
+struct vfe_cmd_stats_buf {
+	uint32_t statsBuf[VFE_STATS_BUFFER_COUNT];
+};
+
+void vfe40_subdev_notify(int id, int path, int image_mode,
+	struct v4l2_subdev *sd, struct vfe_share_ctrl_t *share_ctrl);
+struct vfe40_output_ch *vfe40_get_ch(
+	int path, struct vfe_share_ctrl_t *share_ctrl);
+void vfe40_send_isp_msg(struct v4l2_subdev *sd,
+	uint32_t vfeFrameId, uint32_t isp_msg_id);
+void vfe40_axi_probe(struct axi_ctrl_t *axi_ctrl);
+
+static const uint32_t vfe40_AXI_WM_CFG[] = {
+	0x0000006C,
+	0x00000090,
+	0x000000B4,
+	0x000000D8,
+	0x000000FC,
+	0x00000120,
+	0x00000144,
+};
+
+static struct vfe40_cmd_type vfe40_cmd[] = {
+/*0*/
+	{VFE_CMD_DUMMY_0},
+	{VFE_CMD_SET_CLK},
+	{VFE_CMD_RESET},
+	{VFE_CMD_START},
+	{VFE_CMD_TEST_GEN_START},
+/*5*/
+	{VFE_CMD_OPERATION_CFG, V40_OPERATION_CFG_LEN},
+	{VFE_CMD_AXI_OUT_CFG, V40_AXI_OUT_LEN, V40_AXI_OUT_OFF, 0xFF},
+	{VFE_CMD_CAMIF_CFG, V40_CAMIF_LEN, V40_CAMIF_OFF, 0xFF},
+	{VFE_CMD_AXI_INPUT_CFG},
+	{VFE_CMD_BLACK_LEVEL_CFG},
+/*10*/
+	{VFE_CMD_MESH_ROLL_OFF_CFG},
+	{VFE_CMD_DEMUX_CFG, V40_DEMUX_LEN, V40_DEMUX_OFF, 0xFF},
+	{VFE_CMD_FOV_CFG},
+	{VFE_CMD_MAIN_SCALER_CFG},
+	{VFE_CMD_WB_CFG, V40_WB_LEN, V40_WB_OFF, 0xFF},
+/*15*/
+	{VFE_CMD_COLOR_COR_CFG, V40_COLOR_COR_LEN, V40_COLOR_COR_OFF, 0xFF},
+	{VFE_CMD_RGB_G_CFG, V40_RGB_G_LEN, V40_RGB_G_OFF, 0xFF},
+	{VFE_CMD_LA_CFG, V40_LA_LEN, V40_LA_OFF, 0xFF },
+	{VFE_CMD_CHROMA_EN_CFG, V40_CHROMA_EN_LEN, V40_CHROMA_EN_OFF, 0xFF},
+	{VFE_CMD_CHROMA_SUP_CFG, V40_CHROMA_SUP_LEN, V40_CHROMA_SUP_OFF, 0xFF},
+/*20*/
+	{VFE_CMD_MCE_CFG, V40_MCE_LEN, V40_MCE_OFF, 0xFF},
+	{VFE_CMD_SK_ENHAN_CFG, V40_SCE_LEN, V40_SCE_OFF, 0xFF},
+	{VFE_CMD_ASF_CFG, V40_ASF_LEN, V40_ASF_OFF, 0xFF},
+	{VFE_CMD_S2Y_CFG},
+	{VFE_CMD_S2CbCr_CFG},
+/*25*/
+	{VFE_CMD_CHROMA_SUBS_CFG},
+	{VFE_CMD_OUT_CLAMP_CFG, V40_OUT_CLAMP_LEN, V40_OUT_CLAMP_OFF, 0xFF},
+	{VFE_CMD_FRAME_SKIP_CFG},
+	{VFE_CMD_DUMMY_1},
+	{VFE_CMD_DUMMY_2},
+/*30*/
+	{VFE_CMD_DUMMY_3},
+	{VFE_CMD_UPDATE},
+	{VFE_CMD_BL_LVL_UPDATE},
+	{VFE_CMD_DEMUX_UPDATE, V40_DEMUX_LEN, V40_DEMUX_OFF, 0xFF},
+	{VFE_CMD_FOV_UPDATE},
+/*35*/
+	{VFE_CMD_MAIN_SCALER_UPDATE},
+	{VFE_CMD_WB_UPDATE, V40_WB_LEN, V40_WB_OFF, 0xFF},
+	{VFE_CMD_COLOR_COR_UPDATE, V40_COLOR_COR_LEN, V40_COLOR_COR_OFF, 0xFF},
+	{VFE_CMD_RGB_G_UPDATE, V40_RGB_G_LEN, V40_CHROMA_EN_OFF, 0xFF},
+	{VFE_CMD_LA_UPDATE, V40_LA_LEN, V40_LA_OFF, 0xFF },
+/*40*/
+	{VFE_CMD_CHROMA_EN_UPDATE, V40_CHROMA_EN_LEN, V40_CHROMA_EN_OFF, 0xFF},
+	{VFE_CMD_CHROMA_SUP_UPDATE, V40_CHROMA_SUP_LEN,
+		V40_CHROMA_SUP_OFF, 0xFF},
+	{VFE_CMD_MCE_UPDATE, V40_MCE_LEN, V40_MCE_OFF, 0xFF},
+	{VFE_CMD_SK_ENHAN_UPDATE, V40_SCE_LEN, V40_SCE_OFF, 0xFF},
+	{VFE_CMD_S2CbCr_UPDATE},
+/*45*/
+	{VFE_CMD_S2Y_UPDATE},
+	{VFE_CMD_ASF_UPDATE, V40_ASF_UPDATE_LEN, V40_ASF_OFF, 0xFF},
+	{VFE_CMD_FRAME_SKIP_UPDATE},
+	{VFE_CMD_CAMIF_FRAME_UPDATE},
+	{VFE_CMD_STATS_AF_UPDATE},
+/*50*/
+	{VFE_CMD_STATS_AE_UPDATE},
+	{VFE_CMD_STATS_AWB_UPDATE, V40_STATS_AWB_LEN, V40_STATS_AWB_OFF},
+	{VFE_CMD_STATS_RS_UPDATE, V40_STATS_RS_LEN, V40_STATS_RS_OFF},
+	{VFE_CMD_STATS_CS_UPDATE, V40_STATS_CS_LEN, V40_STATS_CS_OFF},
+	{VFE_CMD_STATS_SKIN_UPDATE},
+/*55*/
+	{VFE_CMD_STATS_IHIST_UPDATE, V40_STATS_IHIST_LEN, V40_STATS_IHIST_OFF},
+	{VFE_CMD_DUMMY_4},
+	{VFE_CMD_EPOCH1_ACK},
+	{VFE_CMD_EPOCH2_ACK},
+	{VFE_CMD_START_RECORDING},
+/*60*/
+	{VFE_CMD_STOP_RECORDING},
+	{VFE_CMD_DUMMY_5},
+	{VFE_CMD_DUMMY_6},
+	{VFE_CMD_CAPTURE, V40_CAPTURE_LEN, 0xFF},
+	{VFE_CMD_DUMMY_7},
+/*65*/
+	{VFE_CMD_STOP},
+	{VFE_CMD_GET_HW_VERSION, V40_GET_HW_VERSION_LEN,
+		V40_GET_HW_VERSION_OFF},
+	{VFE_CMD_GET_FRAME_SKIP_COUNTS},
+	{VFE_CMD_OUTPUT1_BUFFER_ENQ},
+	{VFE_CMD_OUTPUT2_BUFFER_ENQ},
+/*70*/
+	{VFE_CMD_OUTPUT3_BUFFER_ENQ},
+	{VFE_CMD_JPEG_OUT_BUF_ENQ},
+	{VFE_CMD_RAW_OUT_BUF_ENQ},
+	{VFE_CMD_RAW_IN_BUF_ENQ},
+	{VFE_CMD_STATS_AF_ENQ},
+/*75*/
+	{VFE_CMD_STATS_AE_ENQ},
+	{VFE_CMD_STATS_AWB_ENQ},
+	{VFE_CMD_STATS_RS_ENQ},
+	{VFE_CMD_STATS_CS_ENQ},
+	{VFE_CMD_STATS_SKIN_ENQ},
+/*80*/
+	{VFE_CMD_STATS_IHIST_ENQ},
+	{VFE_CMD_DUMMY_8},
+	{VFE_CMD_JPEG_ENC_CFG},
+	{VFE_CMD_DUMMY_9},
+	{VFE_CMD_STATS_AF_START},
+/*85*/
+	{VFE_CMD_STATS_AF_STOP},
+	{VFE_CMD_STATS_AE_START},
+	{VFE_CMD_STATS_AE_STOP},
+	{VFE_CMD_STATS_AWB_START, V40_STATS_AWB_LEN, V40_STATS_AWB_OFF},
+	{VFE_CMD_STATS_AWB_STOP},
+/*90*/
+	{VFE_CMD_STATS_RS_START, V40_STATS_RS_LEN, V40_STATS_RS_OFF},
+	{VFE_CMD_STATS_RS_STOP},
+	{VFE_CMD_STATS_CS_START, V40_STATS_CS_LEN, V40_STATS_CS_OFF},
+	{VFE_CMD_STATS_CS_STOP},
+	{VFE_CMD_STATS_SKIN_START},
+/*95*/
+	{VFE_CMD_STATS_SKIN_STOP},
+	{VFE_CMD_STATS_IHIST_START, V40_STATS_IHIST_LEN, V40_STATS_IHIST_OFF},
+	{VFE_CMD_STATS_IHIST_STOP},
+	{VFE_CMD_DUMMY_10},
+	{VFE_CMD_SYNC_TIMER_SETTING, V40_SYNC_TIMER_LEN, V40_SYNC_TIMER_OFF},
+/*100*/
+	{VFE_CMD_ASYNC_TIMER_SETTING, V40_ASYNC_TIMER_LEN, V40_ASYNC_TIMER_OFF},
+	{VFE_CMD_LIVESHOT},
+	{VFE_CMD_LA_SETUP},
+	{VFE_CMD_LINEARIZATION_CFG, V40_LINEARIZATION_LEN1,
+		V40_LINEARIZATION_OFF1},
+	{VFE_CMD_DEMOSAICV3},
+/*105*/
+	{VFE_CMD_DEMOSAICV3_ABCC_CFG},
+	{VFE_CMD_DEMOSAICV3_DBCC_CFG, V40_DEMOSAICV3_DBCC_LEN,
+		V40_DEMOSAICV3_DBCC_OFF},
+	{VFE_CMD_DEMOSAICV3_DBPC_CFG},
+	{VFE_CMD_DEMOSAICV3_ABF_CFG, V40_DEMOSAICV3_ABF_LEN,
+		V40_DEMOSAICV3_ABF_OFF},
+	{VFE_CMD_DEMOSAICV3_ABCC_UPDATE},
+/*110*/
+	{VFE_CMD_DEMOSAICV3_DBCC_UPDATE, V40_DEMOSAICV3_DBCC_LEN,
+		V40_DEMOSAICV3_DBCC_OFF},
+	{VFE_CMD_DEMOSAICV3_DBPC_UPDATE},
+	{VFE_CMD_XBAR_CFG},
+	{VFE_CMD_MODULE_CFG, V40_MODULE_CFG_LEN, V40_MODULE_CFG_OFF},
+	{VFE_CMD_ZSL},
+/*115*/
+	{VFE_CMD_LINEARIZATION_UPDATE, V40_LINEARIZATION_LEN1,
+		V40_LINEARIZATION_OFF1},
+	{VFE_CMD_DEMOSAICV3_ABF_UPDATE, V40_DEMOSAICV3_ABF_LEN,
+		V40_DEMOSAICV3_ABF_OFF},
+	{VFE_CMD_CLF_CFG, V40_CLF_CFG_LEN, V40_CLF_CFG_OFF},
+	{VFE_CMD_CLF_LUMA_UPDATE, V40_CLF_LUMA_UPDATE_LEN,
+		V40_CLF_LUMA_UPDATE_OFF},
+	{VFE_CMD_CLF_CHROMA_UPDATE, V40_CLF_CHROMA_UPDATE_LEN,
+		V40_CLF_CHROMA_UPDATE_OFF},
+/*120*/
+	{VFE_CMD_PCA_ROLL_OFF_CFG},
+	{VFE_CMD_PCA_ROLL_OFF_UPDATE},
+	{VFE_CMD_GET_REG_DUMP},
+	{VFE_CMD_GET_LINEARIZATON_TABLE},
+	{VFE_CMD_GET_MESH_ROLLOFF_TABLE},
+/*125*/
+	{VFE_CMD_GET_PCA_ROLLOFF_TABLE},
+	{VFE_CMD_GET_RGB_G_TABLE},
+	{VFE_CMD_GET_LA_TABLE},
+	{VFE_CMD_DEMOSAICV3_UPDATE},
+	{VFE_CMD_ACTIVE_REGION_CFG},
+/*130*/
+	{VFE_CMD_COLOR_PROCESSING_CONFIG},
+	{VFE_CMD_STATS_WB_AEC_CONFIG},
+	{VFE_CMD_STATS_WB_AEC_UPDATE},
+	{VFE_CMD_Y_GAMMA_CONFIG},
+	{VFE_CMD_SCALE_OUTPUT1_CONFIG},
+/*135*/
+	{VFE_CMD_SCALE_OUTPUT2_CONFIG},
+	{VFE_CMD_CAPTURE_RAW},
+	{VFE_CMD_STOP_LIVESHOT},
+	{VFE_CMD_RECONFIG_VFE},
+	{VFE_CMD_STATS_REQBUF},
+/*140*/
+	{VFE_CMD_STATS_ENQUEUEBUF},
+	{VFE_CMD_STATS_FLUSH_BUFQ},
+	{VFE_CMD_FOV_ENC_CFG, V40_FOV_ENC_LEN, V40_FOV_ENC_OFF, 0xFF},
+	{VFE_CMD_FOV_VIEW_CFG, V40_FOV_VIEW_LEN, V40_FOV_VIEW_OFF, 0xFF},
+	{VFE_CMD_FOV_ENC_UPDATE, V40_FOV_ENC_LEN, V40_FOV_ENC_OFF, 0xFF},
+/*145*/
+	{VFE_CMD_FOV_VIEW_UPDATE, V40_FOV_VIEW_LEN, V40_FOV_VIEW_OFF, 0xFF},
+	{VFE_CMD_SCALER_ENC_CFG, V40_SCALER_ENC_LEN, V40_SCALER_ENC_OFF, 0xFF},
+	{VFE_CMD_SCALER_VIEW_CFG, V40_SCALER_VIEW_LEN,
+		V40_SCALER_VIEW_OFF, 0xFF},
+	{VFE_CMD_SCALER_ENC_UPDATE, V40_SCALER_ENC_LEN,
+		V40_SCALER_ENC_OFF, 0xFF},
+	{VFE_CMD_SCALER_VIEW_UPDATE, V40_SCALER_VIEW_LEN,
+		V40_SCALER_VIEW_OFF, 0xFF},
+/*150*/
+	{VFE_CMD_COLORXFORM_ENC_CFG, V40_COLORXFORM_ENC_CFG_LEN,
+		V40_COLORXFORM_ENC_CFG_OFF, 0xFF},
+	{VFE_CMD_COLORXFORM_VIEW_CFG, V40_COLORXFORM_VIEW_CFG_LEN,
+		V40_COLORXFORM_VIEW_CFG_OFF},
+	{VFE_CMD_COLORXFORM_ENC_UPDATE, V40_COLORXFORM_ENC_CFG_LEN,
+		V40_COLORXFORM_ENC_CFG_OFF, 0xFF},
+	{VFE_CMD_COLORXFORM_VIEW_UPDATE, V40_COLORXFORM_VIEW_CFG_LEN,
+		V40_COLORXFORM_VIEW_CFG_OFF, 0xFF},
+};
+
+#endif /* __MSM_VFE40_H__ */
diff --git a/drivers/media/video/msm/vfe/msm_vfe40_axi.c b/drivers/media/video/msm/vfe/msm_vfe40_axi.c
new file mode 100644
index 0000000..35d5207
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40_axi.c
@@ -0,0 +1,812 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_isp.h>
+
+#include "msm.h"
+#include "msm_vfe40.h"
+
+static int msm_axi_subdev_s_crystal_freq(struct v4l2_subdev *sd,
+						u32 freq, u32 flags)
+{
+	int rc = 0;
+	int round_rate;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+
+	round_rate = clk_round_rate(axi_ctrl->vfe_clk[0], freq);
+	if (rc < 0) {
+		pr_err("%s: clk_round_rate failed %d\n",
+					__func__, rc);
+		return rc;
+	}
+
+	axi_ctrl->share_ctrl->vfe_clk_rate = round_rate;
+	rc = clk_set_rate(axi_ctrl->vfe_clk[0], round_rate);
+	if (rc < 0)
+		pr_err("%s: clk_set_rate failed %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+void axi_start(struct axi_ctrl_t *axi_ctrl)
+{
+	switch (axi_ctrl->share_ctrl->operation_mode) {
+	case VFE_OUTPUTS_PREVIEW:
+	case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+		if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch2]);
+		}
+		break;
+	default:
+		if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch2]);
+		}
+		break;
+	}
+}
+
+void axi_stop(struct axi_ctrl_t *axi_ctrl)
+{
+	uint8_t  axiBusyFlag = true;
+	/* axi halt command. */
+	msm_camera_io_w(AXI_HALT,
+		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+	wmb();
+	while (axiBusyFlag) {
+		if (msm_camera_io_r(
+			axi_ctrl->share_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
+			axiBusyFlag = false;
+	}
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(AXI_HALT_CLEAR,
+		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+
+	/* after axi halt, then ok to apply global reset. */
+	/* enable reset_ack and async timer interrupt only while
+	stopping the pipeline.*/
+	msm_camera_io_w(0xf0000000,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD,
+		axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+static int vfe40_config_axi(
+	struct axi_ctrl_t *axi_ctrl, int mode, uint32_t *ao)
+{
+	uint32_t *ch_info;
+	uint32_t *axi_cfg = ao;
+
+	/* Update the corresponding write masters for each output*/
+	ch_info = axi_cfg + V40_AXI_CFG_LEN;
+	axi_ctrl->share_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out0.ch1 =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out0.image_mode =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out1.ch1 =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out1.image_mode =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out2.ch1 =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+
+	switch (mode) {
+	case OUTPUT_PRIM:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY;
+		break;
+	case OUTPUT_PRIM_ALL_CHNLS:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
+		break;
+	case OUTPUT_PRIM|OUTPUT_SEC:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY;
+		axi_ctrl->share_ctrl->outpath.output_mode |=
+			VFE40_OUTPUT_MODE_SECONDARY;
+		break;
+	case OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY;
+		axi_ctrl->share_ctrl->outpath.output_mode |=
+			VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS;
+		break;
+	case OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
+		axi_ctrl->share_ctrl->outpath.output_mode |=
+			VFE40_OUTPUT_MODE_SECONDARY;
+		break;
+	default:
+		pr_err("%s Invalid AXI mode %d ", __func__, mode);
+		return -EINVAL;
+	}
+	msm_camera_io_w(*ao, axi_ctrl->share_ctrl->vfebase +
+		VFE_BUS_IO_FORMAT_CFG);
+	msm_camera_io_memcpy(axi_ctrl->share_ctrl->vfebase +
+		vfe40_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
+		vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length - V40_AXI_CH_INF_LEN);
+	return 0;
+}
+
+static int msm_axi_config(struct v4l2_subdev *sd, void __user *arg)
+{
+	struct msm_vfe_cfg_cmd cfgcmd;
+	struct msm_isp_cmd vfecmd;
+	int rc = 0;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return -EFAULT;
+	}
+	if (NULL != arg) {
+		if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+	}
+	if (NULL != cfgcmd.value) {
+		if (copy_from_user(&vfecmd,
+				(void __user *)(cfgcmd.value),
+				sizeof(vfecmd))) {
+			pr_err("%s %d: copy_from_user failed\n", __func__,
+				__LINE__);
+			return -EFAULT;
+		}
+	}
+
+	switch (cfgcmd.cmd_type) {
+	case CMD_AXI_CFG_PRIM: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl, OUTPUT_PRIM, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM_ALL_CHNLS: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl, OUTPUT_PRIM_ALL_CHNLS, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl, OUTPUT_PRIM|OUTPUT_SEC, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC_ALL_CHNLS: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl,
+			OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl,
+			OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS:
+		pr_err("%s Invalid/Unsupported AXI configuration %x",
+			__func__, cfgcmd.cmd_type);
+		break;
+	case CMD_AXI_START:
+		axi_start(axi_ctrl);
+		break;
+	case CMD_AXI_STOP:
+		axi_stop(axi_ctrl);
+		break;
+	default:
+		pr_err("%s Unsupported AXI configuration %x ", __func__,
+			cfgcmd.cmd_type);
+		break;
+	}
+	return rc;
+}
+
+static struct msm_free_buf *vfe40_check_free_buffer(
+	int id, int path, struct axi_ctrl_t *axi_ctrl)
+{
+	struct vfe40_output_ch *outch = NULL;
+	struct msm_free_buf *b = NULL;
+	uint32_t image_mode = 0;
+
+	if (path == VFE_MSG_OUTPUT_PRIMARY)
+		image_mode = axi_ctrl->share_ctrl->outpath.out0.image_mode;
+	else
+		image_mode = axi_ctrl->share_ctrl->outpath.out1.image_mode;
+
+	vfe40_subdev_notify(id, path, image_mode,
+		&axi_ctrl->subdev, axi_ctrl->share_ctrl);
+	outch = vfe40_get_ch(path, axi_ctrl->share_ctrl);
+	if (outch->free_buf.ch_paddr[0])
+		b = &outch->free_buf;
+	return b;
+}
+
+static void vfe_send_outmsg(
+	struct axi_ctrl_t *axi_ctrl, uint8_t msgid,
+	uint32_t ch0_paddr, uint32_t ch1_paddr,
+	uint32_t ch2_paddr, uint32_t image_mode)
+{
+	struct isp_msg_output msg;
+
+	msg.output_id = msgid;
+	msg.buf.image_mode = image_mode;
+	msg.buf.ch_paddr[0]	= ch0_paddr;
+	msg.buf.ch_paddr[1]	= ch1_paddr;
+	msg.buf.ch_paddr[2]	= ch2_paddr;
+	msg.frameCounter = axi_ctrl->share_ctrl->vfeFrameId;
+
+	v4l2_subdev_notify(&axi_ctrl->subdev,
+			NOTIFY_VFE_MSG_OUT,
+			&msg);
+	return;
+}
+
+static void vfe40_process_output_path_irq_0(
+	struct axi_ctrl_t *axi_ctrl)
+{
+	uint32_t ping_pong;
+	uint32_t ch0_paddr, ch1_paddr, ch2_paddr;
+	uint8_t out_bool = 0;
+	struct msm_free_buf *free_buf = NULL;
+
+	free_buf = vfe40_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+		VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+
+	/* we render frames in the following conditions:
+	1. Continuous mode and the free buffer is avaialable.
+	2. In snapshot shot mode, free buffer is not always available.
+	when pending snapshot count is <=1,  then no need to use
+	free buffer.
+	*/
+	out_bool = (
+		(axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_RAW ||
+		axi_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STARTED ||
+		axi_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOP_REQUESTED ||
+		axi_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOPPED) &&
+		(axi_ctrl->share_ctrl->vfe_capture_count <= 1)) ||
+			free_buf;
+
+	if (out_bool) {
+		ping_pong = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+
+		/* Channel 0*/
+		ch0_paddr = vfe40_get_ch_addr(
+			ping_pong, axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch0);
+		/* Channel 1*/
+		ch1_paddr = vfe40_get_ch_addr(
+			ping_pong, axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch1);
+		/* Channel 2*/
+		ch2_paddr = vfe40_get_ch_addr(
+			ping_pong, axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch2);
+
+		CDBG("output path 0, ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
+			ch0_paddr, ch1_paddr, ch2_paddr);
+		if (free_buf) {
+			/* Y channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch0,
+			free_buf->ch_paddr[0]);
+			/* Chroma channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch1,
+			free_buf->ch_paddr[1]);
+			if (free_buf->num_planes > 2)
+				vfe40_put_ch_addr(ping_pong,
+					axi_ctrl->share_ctrl->vfebase,
+					axi_ctrl->share_ctrl->outpath.out0.ch2,
+					free_buf->ch_paddr[2]);
+		}
+		if (axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_MAIN ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_MAIN_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_JPEG ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_JPEG_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RAW ||
+			axi_ctrl->share_ctrl->liveshot_state ==
+				VFE_STATE_STOPPED)
+			axi_ctrl->share_ctrl->outpath.out0.capture_cnt--;
+
+		vfe_send_outmsg(axi_ctrl,
+			MSG_ID_OUTPUT_PRIMARY, ch0_paddr,
+			ch1_paddr, ch2_paddr,
+			axi_ctrl->share_ctrl->outpath.out0.image_mode);
+
+		if (axi_ctrl->share_ctrl->liveshot_state == VFE_STATE_STOPPED)
+			axi_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+	} else {
+		axi_ctrl->share_ctrl->outpath.out0.frame_drop_cnt++;
+		CDBG("path_irq_0 - no free buffer!\n");
+	}
+}
+
+static void vfe40_process_output_path_irq_1(
+	struct axi_ctrl_t *axi_ctrl)
+{
+	uint32_t ping_pong;
+	uint32_t ch0_paddr, ch1_paddr, ch2_paddr;
+	/* this must be snapshot main image output. */
+	uint8_t out_bool = 0;
+	struct msm_free_buf *free_buf = NULL;
+
+	free_buf = vfe40_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+		VFE_MSG_OUTPUT_SECONDARY, axi_ctrl);
+	out_bool = ((axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_MAIN ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_MAIN_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RAW ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_JPEG_AND_THUMB) &&
+			(axi_ctrl->share_ctrl->vfe_capture_count <= 1)) ||
+				free_buf;
+
+	if (out_bool) {
+		ping_pong = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+
+		/* Y channel */
+		ch0_paddr = vfe40_get_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch0);
+		/* Chroma channel */
+		ch1_paddr = vfe40_get_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch1);
+		ch2_paddr = vfe40_get_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch2);
+
+		CDBG("%s ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
+			__func__, ch0_paddr, ch1_paddr, ch2_paddr);
+		if (free_buf) {
+			/* Y channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch0,
+			free_buf->ch_paddr[0]);
+			/* Chroma channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch1,
+			free_buf->ch_paddr[1]);
+			if (free_buf->num_planes > 2)
+				vfe40_put_ch_addr(ping_pong,
+					axi_ctrl->share_ctrl->vfebase,
+					axi_ctrl->share_ctrl->outpath.out1.ch2,
+					free_buf->ch_paddr[2]);
+		}
+		if (axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_MAIN ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_MAIN_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RAW ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_JPEG_AND_THUMB)
+			axi_ctrl->share_ctrl->outpath.out1.capture_cnt--;
+
+		vfe_send_outmsg(axi_ctrl,
+			MSG_ID_OUTPUT_SECONDARY, ch0_paddr,
+			ch1_paddr, ch2_paddr,
+			axi_ctrl->share_ctrl->outpath.out1.image_mode);
+
+	} else {
+		axi_ctrl->share_ctrl->outpath.out1.frame_drop_cnt++;
+		CDBG("path_irq_1 - no free buffer!\n");
+	}
+}
+
+static void msm_axi_process_irq(struct v4l2_subdev *sd, void *arg)
+{
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	uint32_t irqstatus = (uint32_t) arg;
+
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return;
+	}
+	/* next, check output path related interrupts. */
+	if (irqstatus &
+		VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK) {
+		CDBG("Image composite done 0 irq occured.\n");
+		vfe40_process_output_path_irq_0(axi_ctrl);
+	}
+	if (irqstatus &
+		VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK) {
+		CDBG("Image composite done 1 irq occured.\n");
+		vfe40_process_output_path_irq_1(axi_ctrl);
+	}
+	/* in snapshot mode if done then send
+	snapshot done message */
+	if (axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_RAW) {
+		if ((axi_ctrl->share_ctrl->outpath.out0.capture_cnt == 0)
+				&& (axi_ctrl->share_ctrl->outpath.out1.
+				capture_cnt == 0)) {
+			msm_camera_io_w_mb(
+				CAMIF_COMMAND_STOP_IMMEDIATELY,
+				axi_ctrl->share_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+			vfe40_send_isp_msg(&axi_ctrl->subdev,
+				axi_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_SNAPSHOT_DONE);
+		}
+	}
+}
+
+static int msm_axi_buf_cfg(struct v4l2_subdev *sd, void __user *arg)
+{
+	struct msm_camvfe_params *vfe_params =
+		(struct msm_camvfe_params *)arg;
+	struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	void *data = vfe_params->data;
+	int rc = 0;
+
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return -EFAULT;
+	}
+
+	switch (cmd->cmd_type) {
+	case CMD_CONFIG_PING_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe40_output_ch *outch =
+			vfe40_get_ch(path, axi_ctrl->share_ctrl);
+		outch->ping = *((struct msm_free_buf *)data);
+	}
+		break;
+
+	case CMD_CONFIG_PONG_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe40_output_ch *outch =
+			vfe40_get_ch(path, axi_ctrl->share_ctrl);
+		outch->pong = *((struct msm_free_buf *)data);
+	}
+		break;
+
+	case CMD_CONFIG_FREE_BUF_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe40_output_ch *outch =
+			vfe40_get_ch(path, axi_ctrl->share_ctrl);
+		outch->free_buf = *((struct msm_free_buf *)data);
+	}
+		break;
+	default:
+		pr_err("%s Unsupported AXI Buf config %x ", __func__,
+			cmd->cmd_type);
+	}
+	return rc;
+};
+
+static struct msm_cam_clk_info vfe40_clk_info[] = {
+	{"vfe_clk_src", 266670000},
+	{"camss_vfe_vfe_clk", -1},
+	{"camss_csi_vfe_clk", -1},
+	{"top_clk", -1},
+	{"iface_clk", -1},
+	{"bus_clk", -1},
+};
+
+int msm_axi_subdev_init(struct v4l2_subdev *sd,
+			struct msm_cam_media_controller *mctl)
+{
+	int rc = 0;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	v4l2_set_subdev_hostdata(sd, mctl);
+	spin_lock_init(&axi_ctrl->tasklet_lock);
+	INIT_LIST_HEAD(&axi_ctrl->tasklet_q);
+	spin_lock_init(&axi_ctrl->share_ctrl->sd_notify_lock);
+
+	axi_ctrl->share_ctrl->vfebase = ioremap(axi_ctrl->vfemem->start,
+		resource_size(axi_ctrl->vfemem));
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		rc = -ENOMEM;
+		pr_err("%s: vfe ioremap failed\n", __func__);
+		goto remap_failed;
+	}
+
+	if (axi_ctrl->fs_vfe == NULL) {
+		axi_ctrl->fs_vfe =
+			regulator_get(&axi_ctrl->pdev->dev, "vdd");
+		if (IS_ERR(axi_ctrl->fs_vfe)) {
+			pr_err("%s: Regulator FS_VFE get failed %ld\n",
+				__func__, PTR_ERR(axi_ctrl->fs_vfe));
+			axi_ctrl->fs_vfe = NULL;
+			goto fs_failed;
+		} else if (regulator_enable(axi_ctrl->fs_vfe)) {
+			pr_err("%s: Regulator FS_VFE enable failed\n",
+							__func__);
+			regulator_put(axi_ctrl->fs_vfe);
+			axi_ctrl->fs_vfe = NULL;
+			goto fs_failed;
+		}
+	}
+	rc = msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe40_clk_info,
+			axi_ctrl->vfe_clk, ARRAY_SIZE(vfe40_clk_info), 1);
+	if (rc < 0)
+			goto clk_enable_failed;
+
+	msm_camio_bus_scale_cfg(
+		mctl->sdata->pdata->cam_bus_scale_table, S_INIT);
+	msm_camio_bus_scale_cfg(
+		mctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+
+	axi_ctrl->share_ctrl->register_total = VFE40_REGISTER_TOTAL;
+
+	enable_irq(axi_ctrl->vfeirq->start);
+
+	return rc;
+clk_enable_failed:
+	regulator_disable(axi_ctrl->fs_vfe);
+	regulator_put(axi_ctrl->fs_vfe);
+	axi_ctrl->fs_vfe = NULL;
+fs_failed:
+	iounmap(axi_ctrl->share_ctrl->vfebase);
+	axi_ctrl->share_ctrl->vfebase = NULL;
+remap_failed:
+	disable_irq(axi_ctrl->vfeirq->start);
+	return rc;
+}
+
+void msm_axi_subdev_release(struct v4l2_subdev *sd)
+{
+	struct msm_cam_media_controller *pmctl =
+		(struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return;
+	}
+
+	CDBG("%s, free_irq\n", __func__);
+	disable_irq(axi_ctrl->vfeirq->start);
+	tasklet_kill(&axi_ctrl->vfe40_tasklet);
+	msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe40_clk_info,
+		axi_ctrl->vfe_clk, ARRAY_SIZE(vfe40_clk_info), 0);
+
+	if (axi_ctrl->fs_vfe) {
+		regulator_disable(axi_ctrl->fs_vfe);
+		regulator_put(axi_ctrl->fs_vfe);
+		axi_ctrl->fs_vfe = NULL;
+	}
+	iounmap(axi_ctrl->share_ctrl->vfebase);
+	axi_ctrl->share_ctrl->vfebase = NULL;
+
+	if (atomic_read(&axi_ctrl->share_ctrl->irq_cnt))
+		pr_warning("%s, Warning IRQ Count not ZERO\n", __func__);
+
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_EXIT);
+}
+
+static long msm_axi_subdev_ioctl(struct v4l2_subdev *sd,
+			unsigned int cmd, void *arg)
+{
+	int rc = -ENOIOCTLCMD;
+	switch (cmd) {
+	case VIDIOC_MSM_AXI_INIT:
+		rc = msm_axi_subdev_init(sd,
+			(struct msm_cam_media_controller *)arg);
+		break;
+	case VIDIOC_MSM_AXI_CFG:
+		rc = msm_axi_config(sd, arg);
+		break;
+	case VIDIOC_MSM_AXI_IRQ:
+		msm_axi_process_irq(sd, arg);
+		rc = 0;
+		break;
+	case VIDIOC_MSM_AXI_BUF_CFG:
+		msm_axi_buf_cfg(sd, arg);
+		rc = 0;
+		break;
+	case VIDIOC_MSM_AXI_RELEASE:
+		msm_axi_subdev_release(sd);
+		rc = 0;
+		break;
+	default:
+		pr_err("%s: command not found\n", __func__);
+	}
+	return rc;
+}
+
+static const struct v4l2_subdev_core_ops msm_axi_subdev_core_ops = {
+	.ioctl = msm_axi_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_video_ops msm_axi_subdev_video_ops = {
+	.s_crystal_freq = msm_axi_subdev_s_crystal_freq,
+};
+
+static const struct v4l2_subdev_ops msm_axi_subdev_ops = {
+	.core = &msm_axi_subdev_core_ops,
+	.video = &msm_axi_subdev_video_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_axi_internal_ops;
+
+void vfe40_axi_probe(struct axi_ctrl_t *axi_ctrl)
+{
+	struct msm_cam_subdev_info sd_info;
+	v4l2_subdev_init(&axi_ctrl->subdev, &msm_axi_subdev_ops);
+	axi_ctrl->subdev.internal_ops = &msm_axi_internal_ops;
+	axi_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(axi_ctrl->subdev.name,
+			 sizeof(axi_ctrl->subdev.name), "axi");
+	v4l2_set_subdevdata(&axi_ctrl->subdev, axi_ctrl);
+
+	sd_info.sdev_type = AXI_DEV;
+	sd_info.sd_index = axi_ctrl->pdev->id;
+	sd_info.irq_num = 0;
+	msm_cam_register_subdev_node(&axi_ctrl->subdev, &sd_info);
+}
diff --git a/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
index b6daa5f..e1d8b48 100644
--- a/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
@@ -333,6 +333,7 @@
 		"VFE_SCALE_OUTPUT2_CONFIG"},
 	{VFE_CMD_CAPTURE_RAW, VFE_START, QDSP_CMDQUEUE,
 			"VFE_CMD_CAPTURE_RAW", "VFE_START"},
+	{VFE_CMD_STOP_LIVESHOT, VFE_MAX, VFE_MAX},
 	{VFE_CMD_RECONFIG_VFE, VFE_MAX, VFE_MAX},
 };
 
@@ -666,6 +667,8 @@
 	void *data = NULL;
 	struct buf_info *outch = NULL;
 	uint32_t y_phy, cbcr_phy;
+	static uint32_t liveshot_y_phy;
+	static struct vfe_endframe liveshot_swap;
 	struct table_cmd *table_pending = NULL;
 	unsigned long flags;
 	void   *cmd_data = NULL;
@@ -833,48 +836,139 @@
 			if (op_mode & SNAPSHOT_MASK_MODE) {
 				kfree(data);
 				return;
-			} else {
-				free_buf = vfe2x_check_free_buffer(
+			}
+			free_buf = vfe2x_check_free_buffer(
 					VFE_MSG_OUTPUT_IRQ,
 					VFE_MSG_OUTPUT_PRIMARY);
-			      CDBG("free_buf = %x\n", (unsigned int) free_buf);
-			      if (free_buf) {
+			CDBG("free_buf = %x\n",
+					(unsigned int) free_buf);
+			spin_lock_irqsave(
+					&vfe2x_ctrl->liveshot_enabled_lock,
+					flags);
+			if (!vfe2x_ctrl->liveshot_enabled) {
+				spin_unlock_irqrestore(
+						&vfe2x_ctrl->
+						liveshot_enabled_lock,
+						flags);
+				if (free_buf) {
 					fack.header = VFE_OUTPUT2_ACK;
 
 					fack.output2newybufferaddress =
-						(void *)(free_buf->ch_paddr[0]);
+						(void *)
+						(free_buf->ch_paddr[0]);
 
 					fack.output2newcbcrbufferaddress =
-						(void *)(free_buf->ch_paddr[1]);
+						(void *)
+						(free_buf->ch_paddr[1]);
 
 					cmd_data = &fack;
 					len = sizeof(fack);
-					msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
+					msm_adsp_write(vfe_mod,
+							QDSP_CMDQUEUE,
 							cmd_data, len);
-			      } else {
+				} else {
 					fack.header = VFE_OUTPUT2_ACK;
 					fack.output2newybufferaddress =
-					(void *)
-				((struct vfe_endframe *)data)->y_address;
+						(void *)
+						((struct vfe_endframe *)
+						 data)->y_address;
 					fack.output2newcbcrbufferaddress =
-					(void *)
-				((struct vfe_endframe *)data)->cbcr_address;
+						(void *)
+						((struct vfe_endframe *)
+						 data)->cbcr_address;
 					cmd_data = &fack;
 					len = sizeof(fack);
-					msm_adsp_write(vfe_mod, QDSP_CMDQUEUE,
-						cmd_data, len);
+					msm_adsp_write(vfe_mod,
+							QDSP_CMDQUEUE,
+							cmd_data, len);
 					if (!vfe2x_ctrl->zsl_mode) {
 						kfree(data);
 						return;
 					}
 				}
+			} else { /* Live snapshot */
+				spin_unlock_irqrestore(
+						&vfe2x_ctrl->
+						liveshot_enabled_lock,
+						flags);
+				if (free_buf) {
+					/* liveshot_swap to enqueue
+					   when liveshot snapshot buffer
+					   is obtainedi from adsp */
+					liveshot_swap.y_address =
+						((struct vfe_endframe *)
+						 data)->y_address;
+					liveshot_swap.cbcr_address =
+						((struct vfe_endframe *)
+						 data)->cbcr_address;
+
+					fack.header = VFE_OUTPUT2_ACK;
+
+					fack.output2newybufferaddress =
+						(void *)
+						(free_buf->ch_paddr[0]);
+
+					fack.output2newcbcrbufferaddress =
+						(void *)
+						(free_buf->ch_paddr[1]);
+
+					liveshot_y_phy =
+						(uint32_t)
+						fack.output2newybufferaddress;
+
+					cmd_data = &fack;
+					len = sizeof(fack);
+					msm_adsp_write(vfe_mod,
+							QDSP_CMDQUEUE,
+							cmd_data, len);
+				} else if (liveshot_y_phy !=
+						((struct vfe_endframe *)
+						 data)->y_address) {
+
+					fack.header = VFE_OUTPUT2_ACK;
+					fack.output2newybufferaddress =
+						(void *)
+						((struct vfe_endframe *)
+						 data)->y_address;
+
+					fack.output2newcbcrbufferaddress =
+						(void *)
+						((struct vfe_endframe *)
+						 data)->cbcr_address;
+
+					cmd_data = &fack;
+					len = sizeof(fack);
+					msm_adsp_write(vfe_mod,
+							QDSP_CMDQUEUE,
+							cmd_data, len);
+					kfree(data);
+					return;
+				} else {
+					/* Enque data got
+					 * during freebuf */
+					fack.header = VFE_OUTPUT2_ACK;
+					fack.output2newybufferaddress =
+						(void *)
+						(liveshot_swap.y_address);
+
+					fack.output2newcbcrbufferaddress =
+						(void *)
+						(liveshot_swap.cbcr_address);
+					cmd_data = &fack;
+					len = sizeof(fack);
+					msm_adsp_write(vfe_mod,
+							QDSP_CMDQUEUE,
+							cmd_data, len);
+				}
 			}
-			y_phy = ((struct vfe_endframe *)data)->y_address;
-			cbcr_phy = ((struct vfe_endframe *)data)->cbcr_address;
+			y_phy = ((struct vfe_endframe *)data)->
+				y_address;
+			cbcr_phy = ((struct vfe_endframe *)data)->
+				cbcr_address;
 
 
-			CDBG("vfe_7x_convert, y_phy = 0x%x, cbcr_phy = 0x%x\n",
-				 y_phy, cbcr_phy);
+			CDBG("MSG_OUT2:y_phy= 0x%x, cbcr_phy= 0x%x\n",
+					y_phy, cbcr_phy);
 			if (free_buf) {
 				for (i = 0; i < 3; i++) {
 					if (vfe2x_ctrl->free_buf.buf[i].
@@ -892,14 +986,23 @@
 					CDBG("Address doesnt match\n");
 			}
 			memcpy(((struct vfe_frame_extra *)extdata),
-				&((struct vfe_endframe *)data)->extra,
-				sizeof(struct vfe_frame_extra));
+					&((struct vfe_endframe *)data)->extra,
+					sizeof(struct vfe_frame_extra));
 
 			vfe2x_ctrl->vfeFrameId =
-				((struct vfe_frame_extra *)extdata)->frame_id;
-			vfe_send_outmsg(&vfe2x_ctrl->subdev,
+				((struct vfe_frame_extra *)extdata)->
+				frame_id;
+
+			if (!vfe2x_ctrl->liveshot_enabled) {
+				/* Liveshot not enalbed */
+				vfe_send_outmsg(&vfe2x_ctrl->subdev,
 						MSG_ID_OUTPUT_PRIMARY,
 						y_phy, cbcr_phy);
+			} else if (liveshot_y_phy == y_phy) {
+				vfe_send_outmsg(&vfe2x_ctrl->subdev,
+						MSG_ID_OUTPUT_PRIMARY,
+						y_phy, cbcr_phy);
+			}
 			break;
 		case MSG_RESET_ACK:
 		case MSG_START_ACK:
@@ -1056,6 +1159,7 @@
 	int    cnt;
 	int rc = 0;
 	int o_mode = 0;
+	unsigned long flags;
 
 	if (op_mode & SNAPSHOT_MASK_MODE)
 		o_mode = SNAPSHOT_MASK_MODE;
@@ -1164,8 +1268,17 @@
 		ao->output2buffer1_cbcr_phy = ad->ping.ch_paddr[1];
 		ao->output2buffer2_y_phy = ad->pong.ch_paddr[0];
 		ao->output2buffer2_cbcr_phy = ad->pong.ch_paddr[1];
-		ao->output2buffer3_y_phy = ad->free_buf.ch_paddr[0];
-		ao->output2buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+		spin_lock_irqsave(&vfe2x_ctrl->liveshot_enabled_lock,
+				flags);
+		if (vfe2x_ctrl->liveshot_enabled) { /* Live shot */
+			ao->output2buffer3_y_phy = ad->pong.ch_paddr[0];
+			ao->output2buffer3_cbcr_phy = ad->pong.ch_paddr[1];
+		} else {
+			ao->output2buffer3_y_phy = ad->free_buf.ch_paddr[0];
+			ao->output2buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+		}
+		spin_unlock_irqrestore(&vfe2x_ctrl->liveshot_enabled_lock,
+				flags);
 		bptr = &ao->output2buffer4_y_phy;
 		for (cnt = 0; cnt < 5; cnt++) {
 			*bptr = ad->pong.ch_paddr[0];
@@ -1656,6 +1769,26 @@
 			vfe2x_ctrl->reconfig_vfe = 1;
 			return 0;
 		}
+		if (vfecmd.id == VFE_CMD_LIVESHOT) {
+			CDBG("live shot enabled\n");
+			spin_lock_irqsave(&vfe2x_ctrl->liveshot_enabled_lock,
+					flags);
+			vfe2x_ctrl->liveshot_enabled = 1;
+			spin_unlock_irqrestore(&vfe2x_ctrl->
+					liveshot_enabled_lock,
+					flags);
+			return 0;
+		}
+		if (vfecmd.id == VFE_CMD_STOP_LIVESHOT) {
+			CDBG("live shot disabled\n");
+			spin_lock_irqsave(&vfe2x_ctrl->liveshot_enabled_lock,
+					flags);
+			vfe2x_ctrl->liveshot_enabled = 0;
+			spin_unlock_irqrestore(
+					&vfe2x_ctrl->liveshot_enabled_lock,
+					flags);
+			return 0;
+		}
 		if (vfecmd.length > 256 - 4) {
 			cmd_data_alloc =
 			cmd_data = kmalloc(vfecmd.length + 4, GFP_ATOMIC);
@@ -2162,6 +2295,7 @@
 	spin_lock_init(&vfe2x_ctrl->sd_notify_lock);
 	spin_lock_init(&vfe2x_ctrl->table_lock);
 	spin_lock_init(&vfe2x_ctrl->vfe_msg_lock);
+	spin_lock_init(&vfe2x_ctrl->liveshot_enabled_lock);
 	init_waitqueue_head(&stopevent.wait);
 	INIT_LIST_HEAD(&vfe2x_ctrl->table_q);
 	INIT_LIST_HEAD(&vfe2x_ctrl->vfe_msg_q);
diff --git a/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
index 39affc4..7693235 100644
--- a/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
+++ b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
@@ -112,6 +112,9 @@
 	uint32_t stop_pending;
 	uint32_t update_pending;
 
+	spinlock_t liveshot_enabled_lock;
+	uint32_t liveshot_enabled;
+
 	/* v4l2 subdev */
 	struct v4l2_subdev subdev;
 	struct platform_device *pdev;
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 894860b..e8d9e04 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -1135,7 +1135,7 @@
 			goto free_res;
 		}
 
-		rate = c_data->vc_format.clk_freq;
+		rate = c_data->vc_format.clk_freq / 100 * 102;
 		rate_rc = clk_round_rate(dev->vcap_clk, rate);
 		if (rate_rc <= 0) {
 			pr_err("%s: Failed core rnd_rate\n", __func__);
@@ -1251,7 +1251,7 @@
 			goto free_res;
 		}
 
-		rate = c_data->vc_format.clk_freq;
+		rate = c_data->vc_format.clk_freq / 100 * 102;
 		rate_rc = clk_round_rate(dev->vcap_clk, rate);
 		if (rate_rc <= 0) {
 			pr_err("%s: Failed core rnd_rate\n", __func__);
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 90673fc..6d82e11 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -246,6 +246,12 @@
 	},
 };
 
+static struct mfd_cell taiko_devs[] = {
+	{
+		.name = "taiko_codec",
+	},
+};
+
 static void wcd9xxx_bring_up(struct wcd9xxx *wcd9xxx)
 {
 	wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_LEAKAGE_CTL, 0x4);
@@ -336,13 +342,16 @@
 	} else if (wcd9xxx->idbyte_0 == 0x1) {
 		wcd9xxx_dev = tabla1x_devs;
 		wcd9xxx_dev_size = ARRAY_SIZE(tabla1x_devs);
+	} else if (wcd9xxx->idbyte_0 == 0x0 && wcd9xxx->idbyte_1 == 0x0 &&
+		   wcd9xxx->idbyte_2 == 0x2 && wcd9xxx->idbyte_3 == 0x1) {
+		wcd9xxx_dev = taiko_devs;
+		wcd9xxx_dev_size = ARRAY_SIZE(taiko_devs);
 	} else if (wcd9xxx->idbyte_0 == 0x0) {
 		wcd9xxx_dev = sitar_devs;
 		wcd9xxx_dev_size = ARRAY_SIZE(sitar_devs);
 	}
-	ret = mfd_add_devices(wcd9xxx->dev, -1,
-		      wcd9xxx_dev, wcd9xxx_dev_size,
-		      NULL, 0);
+	ret = mfd_add_devices(wcd9xxx->dev, -1, wcd9xxx_dev, wcd9xxx_dev_size,
+			      NULL, 0);
 	if (ret != 0) {
 		dev_err(wcd9xxx->dev, "Failed to add children: %d\n", ret);
 		goto err_irq;
@@ -883,7 +892,9 @@
 		pr_err("%s: error, initializing device failed\n", __func__);
 		goto err_slim_add;
 	}
+
 	wcd9xxx_init_slimslave(wcd9xxx, wcd9xxx_pgd_la);
+
 #ifdef CONFIG_DEBUG_FS
 	debugCodec = wcd9xxx;
 
@@ -1091,6 +1102,23 @@
 	.suspend = wcd9xxx_slim_suspend,
 };
 
+static const struct slim_device_id taiko_slimtest_id[] = {
+	{"taiko-slim", 0},
+	{}
+};
+
+static struct slim_driver taiko_slim_driver = {
+	.driver = {
+		.name = "taiko-slim",
+		.owner = THIS_MODULE,
+	},
+	.probe = wcd9xxx_slim_probe,
+	.remove = wcd9xxx_slim_remove,
+	.id_table = taiko_slimtest_id,
+	.resume = wcd9xxx_slim_resume,
+	.suspend = wcd9xxx_slim_suspend,
+};
+
 #define WCD9XXX_I2C_TOP_LEVEL	0
 #define WCD9XXX_I2C_ANALOG	1
 #define WCD9XXX_I2C_DIGITAL_1	2
@@ -1119,7 +1147,7 @@
 
 static int __init wcd9xxx_init(void)
 {
-	int ret1, ret2, ret3, ret4, ret5;
+	int ret1, ret2, ret3, ret4, ret5, ret6;
 
 	ret1 = slim_driver_register(&tabla_slim_driver);
 	if (ret1 != 0)
@@ -1141,7 +1169,11 @@
 	if (ret5 != 0)
 		pr_err("Failed to register sitar SB driver: %d\n", ret5);
 
-	return (ret1 && ret2 && ret3 && ret4 && ret5) ? -1 : 0;
+	ret6 = slim_driver_register(&taiko_slim_driver);
+	if (ret6 != 0)
+		pr_err("Failed to register taiko SB driver: %d\n", ret6);
+
+	return (ret1 && ret2 && ret3 && ret4 && ret5 && ret6) ? -1 : 0;
 }
 module_init(wcd9xxx_init);
 
diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c
index 789242d..5f839a8 100644
--- a/drivers/mfd/wcd9xxx-slimslave.c
+++ b/drivers/mfd/wcd9xxx-slimslave.c
@@ -12,6 +12,9 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
+#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
+
+#define WCD9XXX_CHIP_ID_TAIKO 0x00000201
 
 struct wcd9xxx_slim_sch_rx {
 	u32 sph;
@@ -30,37 +33,111 @@
 struct wcd9xxx_slim_sch {
 	struct wcd9xxx_slim_sch_rx rx[SLIM_MAX_RX_PORTS];
 	struct wcd9xxx_slim_sch_tx tx[SLIM_MAX_TX_PORTS];
+
+	u16 rx_port_start_offset;
+	u16 num_rx_slave_port;
+	u16 port_ch_0_start_port_id;
+	u16 port_ch_0_end_port_id;
+	u16 pgd_tx_port_ch_1_end_port_id;
+	u16 rx_port_ch_reg_base;
+	u16 port_tx_cfg_reg_base;
+	u16 port_rx_cfg_reg_base;
+	int number_of_tx_slave_dev_ports;
+	int number_of_rx_slave_dev_ports;
 };
 
 static struct wcd9xxx_slim_sch sh_ch;
 
 static int wcd9xxx_alloc_slim_sh_ch_rx(struct wcd9xxx *wcd9xxx,
-					u8 wcd9xxx_pgd_la);
+				       u8 wcd9xxx_pgd_la);
 static int wcd9xxx_alloc_slim_sh_ch_tx(struct wcd9xxx *wcd9xxx,
 					u8 wcd9xxx_pgd_la);
 static int wcd9xxx_dealloc_slim_sh_ch_rx(struct wcd9xxx *wcd9xxx);
 static int wcd9xxx_dealloc_slim_sh_ch_tx(struct wcd9xxx *wcd9xxx);
 
+static int wcd9xxx_configure_ports(struct wcd9xxx *wcd9xxx)
+{
+	int i;
+	u32 id;
+	for (i = 0; i < 4; i++)
+		((u8 *)&id)[i] = wcd9xxx_reg_read(wcd9xxx,
+						  WCD9XXX_A_CHIP_ID_BYTE_0 + i);
+	id = cpu_to_be32(id);
+	pr_debug("%s: chip id 0x%08x\n", __func__, id);
+	if (id != WCD9XXX_CHIP_ID_TAIKO) {
+		sh_ch.rx_port_start_offset =
+		    TABLA_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS;
+		sh_ch.num_rx_slave_port =
+		    TABLA_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS;
+		sh_ch.port_ch_0_start_port_id =
+		    TABLA_SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID;
+		sh_ch.port_ch_0_end_port_id =
+		    TABLA_SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID;
+		sh_ch.pgd_tx_port_ch_1_end_port_id =
+		    TABLA_SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID;
+
+		sh_ch.rx_port_ch_reg_base =
+		    0x180 + (TABLA_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS * 4);
+		sh_ch.port_rx_cfg_reg_base =
+		    0x040 + (TABLA_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS);
+		sh_ch.port_tx_cfg_reg_base = 0x040;
+
+		sh_ch.number_of_tx_slave_dev_ports =
+		    TABLA_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS;
+		sh_ch.number_of_rx_slave_dev_ports =
+		    TABLA_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS;
+	} else {
+		sh_ch.rx_port_start_offset =
+		    TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS;
+		sh_ch.num_rx_slave_port =
+		    TAIKO_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS;
+		sh_ch.port_ch_0_start_port_id =
+		    TAIKO_SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID;
+		sh_ch.port_ch_0_end_port_id =
+		    TAIKO_SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID;
+		sh_ch.pgd_tx_port_ch_1_end_port_id =
+		    TAIKO_SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID;
+
+		sh_ch.rx_port_ch_reg_base = 0x180;
+		sh_ch.port_rx_cfg_reg_base = 0x040;
+		sh_ch.port_tx_cfg_reg_base = 0x050;
+
+		sh_ch.number_of_tx_slave_dev_ports =
+		    TAIKO_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS;
+		sh_ch.number_of_rx_slave_dev_ports =
+		    TAIKO_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS;
+	}
+
+	return 0;
+}
+
 int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la)
 {
 	int ret = 0;
 
+	ret = wcd9xxx_configure_ports(wcd9xxx);
+	if (ret) {
+		pr_err("%s: Failed to configure register address offset\n",
+		       __func__);
+		goto err;
+	}
+
 	ret = wcd9xxx_alloc_slim_sh_ch_rx(wcd9xxx, wcd9xxx_pgd_la);
 	if (ret) {
 		pr_err("%s: Failed to alloc rx slimbus shared channels\n",
-								__func__);
-		goto rx_err;
+		       __func__);
+		goto err;
 	}
 	ret = wcd9xxx_alloc_slim_sh_ch_tx(wcd9xxx, wcd9xxx_pgd_la);
 	if (ret) {
 		pr_err("%s: Failed to alloc tx slimbus shared channels\n",
-								__func__);
+		       __func__);
 		goto tx_err;
 	}
 	return 0;
 tx_err:
 	wcd9xxx_dealloc_slim_sh_ch_rx(wcd9xxx);
-rx_err:
+err:
 	return ret;
 }
 
@@ -82,23 +159,22 @@
 	return ret;
 }
 
-int wcd9xxx_get_channel(struct wcd9xxx *wcd9xxx,
-		unsigned int *rx_ch,
-		unsigned int *tx_ch)
+int wcd9xxx_get_channel(struct wcd9xxx *wcd9xxx, unsigned int *rx_ch,
+			unsigned int *tx_ch)
 {
 	int ch_idx = 0;
 	struct wcd9xxx_slim_sch_rx *rx = sh_ch.rx;
 	struct wcd9xxx_slim_sch_tx *tx = sh_ch.tx;
 
-	for (ch_idx = 0; ch_idx < SLIM_MAX_RX_PORTS; ch_idx++)
+	for (ch_idx = 0; ch_idx < sh_ch.number_of_rx_slave_dev_ports; ch_idx++)
 		rx_ch[ch_idx] = rx[ch_idx].ch_num;
-	for (ch_idx = 0; ch_idx < SLIM_MAX_TX_PORTS; ch_idx++)
+	for (ch_idx = 0; ch_idx < sh_ch.number_of_tx_slave_dev_ports; ch_idx++)
 		tx_ch[ch_idx] = tx[ch_idx].ch_num;
 	return 0;
 }
 
 static int wcd9xxx_alloc_slim_sh_ch_rx(struct wcd9xxx *wcd9xxx,
-			u8 wcd9xxx_pgd_la)
+				       u8 wcd9xxx_pgd_la)
 {
 	int ret = 0;
 	u8 ch_idx ;
@@ -109,35 +185,38 @@
 	 * DSP requires channel number to be between 128 and 255.
 	 */
 	pr_debug("%s: pgd_la[%d]\n", __func__, wcd9xxx_pgd_la);
-	for (ch_idx = 0; ch_idx < SLIM_MAX_RX_PORTS; ch_idx++) {
-		slave_port_id = (ch_idx + 1 +
-				SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS);
+	for (ch_idx = 0; ch_idx < sh_ch.number_of_rx_slave_dev_ports;
+	     ch_idx++) {
+		slave_port_id = (ch_idx + sh_ch.rx_port_start_offset);
 		rx[ch_idx].ch_num = slave_port_id + BASE_CH_NUM;
 		ret = slim_get_slaveport(wcd9xxx_pgd_la, slave_port_id,
 					&rx[ch_idx].sph, SLIM_SINK);
 		if (ret < 0) {
 			pr_err("%s: slave port failure id[%d] ret[%d]\n",
-					__func__, slave_port_id, ret);
+			       __func__, slave_port_id, ret);
 			goto err;
 		}
 
 		ret = slim_query_ch(wcd9xxx->slim, rx[ch_idx].ch_num,
-							&rx[ch_idx].ch_h);
+				    &rx[ch_idx].ch_h);
 		if (ret < 0) {
 			pr_err("%s: slim_query_ch failed ch-num[%d] ret[%d]\n",
-					__func__, rx[ch_idx].ch_num, ret);
+			       __func__, rx[ch_idx].ch_num, ret);
 			goto err;
 		}
+		pr_debug("%s:ch_num=%d ch_h=%d sph=%d la=%d slave_port_id %d\n",
+			 __func__, rx[ch_idx].ch_num, rx[ch_idx].ch_h,
+			 rx[ch_idx].sph, wcd9xxx_pgd_la, slave_port_id);
 	}
 err:
 	return ret;
 }
 
 static int wcd9xxx_alloc_slim_sh_ch_tx(struct wcd9xxx *wcd9xxx,
-			u8 wcd9xxx_pgd_la)
+				       u8 wcd9xxx_pgd_la)
 {
 	int ret = 0;
-	u8 ch_idx ;
+	u8 ch_idx;
 	struct wcd9xxx_slim_sch_tx *tx = sh_ch.tx;
 	u16 slave_port_id = 0;
 
@@ -146,21 +225,22 @@
 	 * use channel numbers from 138 to 144, for TX port
 	 * use channel numbers from 128 to 137
 	 */
-	for (ch_idx = 0; ch_idx < SLIM_MAX_TX_PORTS; ch_idx++) {
+	for (ch_idx = 0; ch_idx < sh_ch.number_of_tx_slave_dev_ports;
+	     ch_idx++) {
 		slave_port_id = ch_idx;
 		tx[ch_idx].ch_num = slave_port_id + BASE_CH_NUM;
 		ret = slim_get_slaveport(wcd9xxx_pgd_la, slave_port_id,
-					&tx[ch_idx].sph, SLIM_SRC);
+					 &tx[ch_idx].sph, SLIM_SRC);
 		if (ret < 0) {
 			pr_err("%s: slave port failure id[%d] ret[%d]\n",
-					__func__, slave_port_id, ret);
+			       __func__, slave_port_id, ret);
 			goto err;
 		}
 		ret = slim_query_ch(wcd9xxx->slim, tx[ch_idx].ch_num,
-							&tx[ch_idx].ch_h);
+				    &tx[ch_idx].ch_h);
 		if (ret < 0) {
 			pr_err("%s: slim_query_ch failed ch-num[%d] ret[%d]\n",
-					__func__, tx[ch_idx].ch_num, ret);
+			       __func__, tx[ch_idx].ch_num, ret);
 			goto err;
 		}
 	}
@@ -174,7 +254,7 @@
 	int ret = 0;
 	struct wcd9xxx_slim_sch_rx *rx = sh_ch.rx;
 	/* slim_dealloc_ch */
-	for (idx = 0; idx < SLIM_MAX_RX_PORTS; idx++) {
+	for (idx = 0; idx < sh_ch.number_of_rx_slave_dev_ports; idx++) {
 		ret = slim_dealloc_ch(wcd9xxx->slim, rx[idx].ch_h);
 		if (ret < 0) {
 			pr_err("%s: slim_dealloc_ch fail ret[%d] ch_h[%d]\n",
@@ -191,7 +271,7 @@
 	int ret = 0;
 	struct wcd9xxx_slim_sch_tx *tx = sh_ch.tx;
 	/* slim_dealloc_ch */
-	for (idx = 0; idx < SLIM_MAX_TX_PORTS; idx++) {
+	for (idx = 0; idx < sh_ch.number_of_tx_slave_dev_ports; idx++) {
 		ret = slim_dealloc_ch(wcd9xxx->slim, tx[idx].ch_h);
 		if (ret < 0) {
 			pr_err("%s: slim_dealloc_ch fail ret[%d] ch_h[%d]\n",
@@ -204,9 +284,9 @@
 
 /* Enable slimbus slave device for RX path */
 int wcd9xxx_cfg_slim_sch_rx(struct wcd9xxx *wcd9xxx, unsigned int *ch_num,
-				unsigned int ch_cnt, unsigned int rate)
+			    unsigned int ch_cnt, unsigned int rate)
 {
-	u8 i = 0;
+	u8 i;
 	u16 grph;
 	u32 sph[SLIM_MAX_RX_PORTS] = {0};
 	u16 ch_h[SLIM_MAX_RX_PORTS] = {0};
@@ -221,53 +301,56 @@
 	pr_debug("%s: ch_cnt[%d] rate=%d\n", __func__, ch_cnt, rate);
 
 	for (i = 0; i < ch_cnt; i++) {
-		idx = (ch_num[i] - BASE_CH_NUM -
-			SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS - 1);
+		idx = (ch_num[i] - BASE_CH_NUM - sh_ch.rx_port_start_offset);
 		ch_h[i] = rx[idx].ch_h;
 		sph[i] = rx[idx].sph;
-		slave_port_id = idx + 1;
-		if ((slave_port_id > SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS) ||
-			(slave_port_id == 0)) {
+		slave_port_id = idx;
+		pr_debug("%s: idx %d, ch_h %d, sph %d\n",
+			 __func__, idx, ch_h[i], sph[i]);
+		if ((slave_port_id > sh_ch.num_rx_slave_port)) {
 			pr_err("Slimbus: invalid slave port id: %d",
-							slave_port_id);
+			       slave_port_id);
 			ret = -EINVAL;
 			goto err;
 		}
-		slave_port_id += SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS;
+		slave_port_id += sh_ch.rx_port_start_offset;
+		pr_debug("%s: slave_port_id %d\n", __func__, slave_port_id);
 		/* look for the valid port range and chose the
 		 * payload accordingly
 		 */
-		if ((slave_port_id >
-				SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID) &&
-			(slave_port_id <=
-			 SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID)) {
-				payload_rx = payload_rx  |
-				(1 <<
-				(slave_port_id -
-				SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID));
+		if ((slave_port_id > sh_ch.pgd_tx_port_ch_1_end_port_id) &&
+		    (slave_port_id <= sh_ch.port_ch_0_end_port_id)) {
+			payload_rx = payload_rx |
+				(1 << (slave_port_id -
+				      sh_ch.port_ch_0_start_port_id));
 		} else {
 			ret = -EINVAL;
 			goto err;
 		}
+
 		multi_chan_cfg_reg_addr =
-				SB_PGD_RX_PORT_MULTI_CHANNEL_0(slave_port_id);
+		    SB_PGD_RX_PORT_MULTI_CHANNEL_0(sh_ch.rx_port_ch_reg_base,
+						   idx);
+		pr_debug("%s: multi_chan_cfg_reg_addr 0x%x\n", __func__,
+			 multi_chan_cfg_reg_addr);
+
 		/* write to interface device */
 		ret = wcd9xxx_interface_reg_write(wcd9xxx,
-				multi_chan_cfg_reg_addr,
-				payload_rx);
+						  multi_chan_cfg_reg_addr,
+						  payload_rx);
 		if (ret < 0) {
 			pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n",
-							__func__,
-							multi_chan_cfg_reg_addr,
-							payload_rx, ret);
+			       __func__, multi_chan_cfg_reg_addr,
+			       payload_rx, ret);
 			goto err;
 		}
 		/* configure the slave port for water mark and enable*/
 		wm_payload = (SLAVE_PORT_WATER_MARK_VALUE <<
-				SLAVE_PORT_WATER_MARK_SHIFT) +
-				SLAVE_PORT_ENABLE;
-		ret = wcd9xxx_interface_reg_write(wcd9xxx,
-				SB_PGD_PORT_CFG_BYTE_ADDR(slave_port_id),
+			      SLAVE_PORT_WATER_MARK_SHIFT) + SLAVE_PORT_ENABLE;
+		ret = wcd9xxx_interface_reg_write(
+				wcd9xxx,
+				SB_PGD_PORT_CFG_BYTE_ADDR(
+				    sh_ch.port_rx_cfg_reg_base, idx),
 				wm_payload);
 		if (ret < 0) {
 			pr_err("%s:watermark set failure for port[%d] ret[%d]",
@@ -283,16 +366,14 @@
 	prop.ratem = (rate/4000);
 	prop.sampleszbits = 16;
 
-	ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt,
-					true, &grph);
+	ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt, true, &grph);
 	if (ret < 0) {
 		pr_err("%s: slim_define_ch failed ret[%d]\n",
 					__func__, ret);
 		goto err;
 	}
 	for (i = 0; i < ch_cnt; i++) {
-		ret = slim_connect_sink(wcd9xxx->slim, &sph[i],
-							1, ch_h[i]);
+		ret = slim_connect_sink(wcd9xxx->slim, &sph[i], 1, ch_h[i]);
 		if (ret < 0) {
 			pr_err("%s: slim_connect_sink failed ret[%d]\n",
 						__func__, ret);
@@ -300,16 +381,14 @@
 		}
 	}
 	/* slim_control_ch */
-	ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_ACTIVATE,
-					true);
+	ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_ACTIVATE, true);
 	if (ret < 0) {
 		pr_err("%s: slim_control_ch failed ret[%d]\n",
 				__func__, ret);
 		goto err_close_slim_sch;
 	}
 	for (i = 0; i < ch_cnt; i++) {
-		idx = (ch_num[i] - BASE_CH_NUM -
-				SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS - 1);
+		idx = (ch_num[i] - BASE_CH_NUM - sh_ch.rx_port_start_offset);
 		rx[idx].grph = grph;
 	}
 	return 0;
@@ -324,7 +403,7 @@
 
 /* Enable slimbus slave device for RX path */
 int wcd9xxx_cfg_slim_sch_tx(struct wcd9xxx *wcd9xxx, unsigned int *ch_num,
-				unsigned int ch_cnt, unsigned int rate)
+			    unsigned int ch_cnt, unsigned int rate)
 {
 	u8 i = 0;
 	u8  payload_tx_0 = 0, payload_tx_1 = 0, wm_payload = 0;
@@ -333,7 +412,7 @@
 	u16 ch_h[SLIM_MAX_TX_PORTS] = {0};
 	u16 idx = 0, slave_port_id;
 	int ret = 0;
-	unsigned short  multi_chan_cfg_reg_addr;
+	unsigned short multi_chan_cfg_reg_addr;
 
 	struct wcd9xxx_slim_sch_tx *tx = sh_ch.tx;
 	struct slim_ch prop;
@@ -343,10 +422,12 @@
 		idx = (ch_num[i] - BASE_CH_NUM);
 		ch_h[i] = tx[idx].ch_h;
 		sph[i] = tx[idx].sph;
-		slave_port_id = idx ;
-		if (slave_port_id > SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS) {
+		slave_port_id = idx;
+		pr_debug("%s: idx %d, ch_h %d, sph %d, slave_port_id %d\n",
+			 __func__, idx, ch_h[i], sph[i], slave_port_id);
+		if (slave_port_id > sh_ch.number_of_tx_slave_dev_ports) {
 			pr_err("SLIMbus: invalid slave port id: %d",
-							slave_port_id);
+			       slave_port_id);
 			ret = -EINVAL;
 			goto err;
 		}
@@ -354,55 +435,60 @@
 		 *  payload accordingly
 		 */
 		if (slave_port_id <=
-			SB_PGD_TX_PORT_MULTI_CHANNEL_0_END_PORT_ID) {
+		    SB_PGD_TX_PORT_MULTI_CHANNEL_0_END_PORT_ID) {
 			payload_tx_0 = payload_tx_0 | (1 << slave_port_id);
 		} else if (slave_port_id <=
-				SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID) {
-				payload_tx_1 = payload_tx_1 |
-				(1 <<
-				(slave_port_id -
-				SB_PGD_TX_PORT_MULTI_CHANNEL_1_START_PORT_ID));
+			   sh_ch.pgd_tx_port_ch_1_end_port_id) {
+			payload_tx_1 = payload_tx_1 |
+			    (1 << (slave_port_id -
+				 SB_PGD_TX_PORT_MULTI_CHANNEL_1_START_PORT_ID));
 		} else {
+			pr_err("%s: slave port id %d error\n", __func__,
+			       slave_port_id);
 			ret = -EINVAL;
 			goto err;
 		}
 		multi_chan_cfg_reg_addr =
-				SB_PGD_TX_PORT_MULTI_CHANNEL_0(slave_port_id);
+		    SB_PGD_TX_PORT_MULTI_CHANNEL_0(slave_port_id);
+		pr_debug("%s: multi_chan_cfg_reg_addr 0x%x\n", __func__,
+			 multi_chan_cfg_reg_addr);
 		/* write to interface device */
 		ret = wcd9xxx_interface_reg_write(wcd9xxx,
 				multi_chan_cfg_reg_addr,
 				payload_tx_0);
 		if (ret < 0) {
 			pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n",
-								__func__,
-						multi_chan_cfg_reg_addr,
-						payload_tx_0, ret);
+			       __func__, multi_chan_cfg_reg_addr, payload_tx_0,
+			       ret);
 			goto err;
 		}
 		multi_chan_cfg_reg_addr =
-				SB_PGD_TX_PORT_MULTI_CHANNEL_1(slave_port_id);
+		    SB_PGD_TX_PORT_MULTI_CHANNEL_1(slave_port_id);
 		/* ports 8,9 */
 		ret = wcd9xxx_interface_reg_write(wcd9xxx,
-				multi_chan_cfg_reg_addr,
-				payload_tx_1);
+						  multi_chan_cfg_reg_addr,
+						  payload_tx_1);
 		if (ret < 0) {
 			pr_err("%s:Intf-dev fail reg[%d] payload[%d] ret[%d]\n",
-								__func__,
-						multi_chan_cfg_reg_addr,
-						payload_tx_1, ret);
+			       __func__, multi_chan_cfg_reg_addr,
+			       payload_tx_1, ret);
 			goto err;
 		}
 		/* configure the slave port for water mark and enable*/
 		wm_payload = (SLAVE_PORT_WATER_MARK_VALUE <<
-				SLAVE_PORT_WATER_MARK_SHIFT) +
-				SLAVE_PORT_ENABLE;
-		ret = wcd9xxx_interface_reg_write(wcd9xxx,
-				SB_PGD_PORT_CFG_BYTE_ADDR(slave_port_id),
-				wm_payload);
+			      SLAVE_PORT_WATER_MARK_SHIFT) + SLAVE_PORT_ENABLE;
+		pr_debug("%s: tx_cfg_reg 0x%x wm 0x%x\n", __func__,
+			 SB_PGD_PORT_CFG_BYTE_ADDR(sh_ch.port_tx_cfg_reg_base,
+						   slave_port_id), wm_payload);
+		ret = wcd9xxx_interface_reg_write(
+					wcd9xxx,
+					SB_PGD_PORT_CFG_BYTE_ADDR(
+					    sh_ch.port_tx_cfg_reg_base,
+					    slave_port_id),
+					wm_payload);
 		if (ret < 0) {
-			pr_err("%s:watermark set failure for port[%d] ret[%d]",
-						__func__,
-						slave_port_id, ret);
+			pr_err("%s: watermark set failure for port[%d] ret[%d]",
+			       __func__, slave_port_id, ret);
 		}
 	}
 
@@ -413,25 +499,21 @@
 	prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
 	prop.ratem = (rate/4000);
 	prop.sampleszbits = 16;
-	ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt,
-					true, &grph);
+	ret = slim_define_ch(wcd9xxx->slim, &prop, ch_h, ch_cnt, true, &grph);
 	if (ret < 0) {
-		pr_err("%s: slim_define_ch failed ret[%d]\n",
-					__func__, ret);
+		pr_err("%s: slim_define_ch failed ret[%d]\n", __func__, ret);
 		goto err;
 	}
 	for (i = 0; i < ch_cnt; i++) {
-		ret = slim_connect_src(wcd9xxx->slim, sph[i],
-							ch_h[i]);
+		ret = slim_connect_src(wcd9xxx->slim, sph[i], ch_h[i]);
 		if (ret < 0) {
 			pr_err("%s: slim_connect_src failed ret[%d]\n",
-						__func__, ret);
+			       __func__, ret);
 			goto err;
 		}
 	}
 	/* slim_control_ch */
-	ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_ACTIVATE,
-					true);
+	ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_ACTIVATE, true);
 	if (ret < 0) {
 		pr_err("%s: slim_control_ch failed ret[%d]\n",
 				__func__, ret);
@@ -460,34 +542,33 @@
 
 	pr_debug("%s: ch_cnt[%d]\n", __func__, ch_cnt);
 	for (i = 0; i < ch_cnt; i++) {
-		idx = (ch_num[i] - BASE_CH_NUM -
-			SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS - 1);
+		idx = (ch_num[i] - BASE_CH_NUM - sh_ch.rx_port_start_offset);
 		if (idx < 0) {
 			pr_err("%s: Error:-Invalid index found = %d\n",
-				__func__, idx);
+			       __func__, idx);
 			ret = -EINVAL;
 			goto err;
 		}
 		sph[i] = rx[idx].sph;
 		grph = rx[idx].grph;
+		pr_debug("%s: ch_num[%d] %d, idx %d, sph[%d] %x, grph %x\n",
+			 __func__, i, ch_num[i], idx, i, sph[i], grph);
 	}
 
 	/* slim_disconnect_port */
 	ret = slim_disconnect_ports(wcd9xxx->slim, sph, ch_cnt);
 	if (ret < 0) {
 		pr_err("%s: slim_disconnect_ports failed ret[%d]\n",
-				__func__, ret);
+		       __func__, ret);
 	}
 	/* slim_control_ch (REMOVE) */
 	ret = slim_control_ch(wcd9xxx->slim, grph, SLIM_CH_REMOVE, true);
 	if (ret < 0) {
-		pr_err("%s: slim_control_ch failed ret[%d]\n",
-				__func__, ret);
+		pr_err("%s: slim_control_ch failed ret[%d]\n", __func__, ret);
 		goto err;
 	}
 	for (i = 0; i < ch_cnt; i++) {
-		idx = (ch_num[i] - BASE_CH_NUM -
-				SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS - 1);
+		idx = (ch_num[i] - BASE_CH_NUM - sh_ch.rx_port_start_offset);
 		rx[idx].grph = 0;
 	}
 err:
@@ -496,7 +577,7 @@
 EXPORT_SYMBOL_GPL(wcd9xxx_close_slim_sch_rx);
 
 int wcd9xxx_close_slim_sch_tx(struct wcd9xxx *wcd9xxx, unsigned int *ch_num,
-				unsigned int ch_cnt)
+			      unsigned int ch_cnt)
 {
 	u16 grph = 0;
 	u32 sph[SLIM_MAX_TX_PORTS] = {0};
diff --git a/drivers/misc/tsif.c b/drivers/misc/tsif.c
index aeda38c..7e59c98 100644
--- a/drivers/misc/tsif.c
+++ b/drivers/misc/tsif.c
@@ -169,6 +169,7 @@
 	dma_addr_t dmov_cmd_dma[2];
 	struct tsif_xfer xfer[2];
 	struct tasklet_struct dma_refill;
+	struct tasklet_struct clocks_off;
 	/* statistics */
 	u32 stat_rx;
 	u32 stat_overflow;
@@ -251,18 +252,24 @@
 {
 	if (on) {
 		if (tsif_device->tsif_clk)
-			clk_enable(tsif_device->tsif_clk);
+			clk_prepare_enable(tsif_device->tsif_clk);
 		if (tsif_device->tsif_pclk)
-			clk_enable(tsif_device->tsif_pclk);
-		clk_enable(tsif_device->tsif_ref_clk);
+			clk_prepare_enable(tsif_device->tsif_pclk);
+		clk_prepare_enable(tsif_device->tsif_ref_clk);
 	} else {
 		if (tsif_device->tsif_clk)
-			clk_disable(tsif_device->tsif_clk);
+			clk_disable_unprepare(tsif_device->tsif_clk);
 		if (tsif_device->tsif_pclk)
-			clk_disable(tsif_device->tsif_pclk);
-		clk_disable(tsif_device->tsif_ref_clk);
+			clk_disable_unprepare(tsif_device->tsif_pclk);
+		clk_disable_unprepare(tsif_device->tsif_ref_clk);
 	}
 }
+
+static void tsif_clocks_off(unsigned long data)
+{
+	struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
+	tsif_clock(tsif_device, 0);
+}
 /* ===clocks end=== */
 /* ===gpio begin=== */
 
@@ -605,17 +612,15 @@
 			if (tsif_device->state == tsif_state_running) {
 				tsif_stop_hw(tsif_device);
 				/*
-				 * Clocks _may_ be stopped right from IRQ
-				 * context. This is far from optimal w.r.t
-				 * latency.
-				 *
-				 * But, this branch taken only in case of
+				 * This branch is taken only in case of
 				 * severe hardware problem (I don't even know
-				 * what should happens for DMOV_RSLT_ERROR);
+				 * what should happen for DMOV_RSLT_ERROR);
 				 * thus I prefer code simplicity over
 				 * performance.
+				 * Clocks are turned off from outside the
+				 * interrupt context.
 				 */
-				tsif_clock(tsif_device, 0);
+				tasklet_schedule(&tsif_device->clocks_off);
 				tsif_device->state = tsif_state_flushing;
 			}
 		}
@@ -1313,6 +1318,8 @@
 	tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
 	tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
 		     (unsigned long)tsif_device);
+	tasklet_init(&tsif_device->clocks_off, tsif_clocks_off,
+		     (unsigned long)tsif_device);
 	if (tsif_get_clocks(tsif_device))
 		goto err_clocks;
 /* map I/O memory */
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d13b914..e4d0fc1 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1988,11 +1988,15 @@
 {
 	u8 rst_n_function;
 
-	if (!mmc_card_mmc(card))
+	if (mmc_card_sdio(card))
 		return 0;
-	rst_n_function = card->ext_csd.rst_n_function;
-	if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
-		return 0;
+
+	if (mmc_card_mmc(card)) {
+		rst_n_function = card->ext_csd.rst_n_function;
+		if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) !=
+		    EXT_CSD_RST_N_ENABLED)
+			return 0;
+	}
 	return 1;
 }
 EXPORT_SYMBOL(mmc_can_reset);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 36f87df..b22e2f0 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -2385,8 +2385,10 @@
 	struct msm_mmc_reg_data *vreg_table[2];
 
 	curr_slot = host->plat->vreg_data;
-	if (!curr_slot)
+	if (!curr_slot) {
+		rc = -EINVAL;
 		goto out;
+	}
 
 	vreg_table[0] = curr_slot->vdd_data;
 	vreg_table[1] = curr_slot->vdd_io_data;
@@ -3988,6 +3990,51 @@
 	return rc;
 }
 
+/*
+ * Work around of the unavailability of a power_reset functionality in SD cards
+ * by turning the OFF & back ON the regulators supplying the SD card.
+ */
+void msmsdcc_hw_reset(struct mmc_host *mmc)
+{
+	struct mmc_card *card = mmc->card;
+	struct msmsdcc_host *host = mmc_priv(mmc);
+	int rc;
+
+	/* Write-protection bits would be lost on a hardware reset in emmc */
+	if (!card || !mmc_card_sd(card))
+		return;
+
+	/*
+	 * Continuing on failing to disable regulator would lead to a panic
+	 * anyway, since the commands would fail and console would be flooded
+	 * with prints, eventually leading to a watchdog bark
+	 */
+	rc = msmsdcc_setup_vreg(host, false, false);
+	if (rc) {
+		pr_err("%s: %s disable regulator: failed: %d\n",
+		       mmc_hostname(mmc), __func__, rc);
+		BUG_ON(rc);
+	}
+
+	/* 10ms delay for the supply to reach the desired voltage level */
+	usleep_range(10000, 12000);
+
+	/*
+	 * Continuing on failing to enable regulator would lead to a panic
+	 * anyway, since the commands would fail and console would be flooded
+	 * with prints, eventually leading to a watchdog bark
+	 */
+	rc = msmsdcc_setup_vreg(host, true, false);
+	if (rc) {
+		pr_err("%s: %s enable regulator: failed: %d\n",
+		       mmc_hostname(mmc), __func__, rc);
+		BUG_ON(rc);
+	}
+
+	/* 10ms delay for the supply to reach the desired voltage level */
+	usleep_range(10000, 12000);
+}
+
 static const struct mmc_host_ops msmsdcc_ops = {
 	.enable		= msmsdcc_enable,
 	.disable	= msmsdcc_disable,
@@ -3998,7 +4045,8 @@
 	.get_ro		= msmsdcc_get_ro,
 	.enable_sdio_irq = msmsdcc_enable_sdio_irq,
 	.start_signal_voltage_switch = msmsdcc_switch_io_voltage,
-	.execute_tuning = msmsdcc_execute_tuning
+	.execute_tuning = msmsdcc_execute_tuning,
+	.hw_reset = msmsdcc_hw_reset,
 };
 
 static unsigned int
@@ -5361,7 +5409,6 @@
 	struct resource *dmares = NULL;
 	struct resource *dma_crci_res = NULL;
 	int ret = 0;
-	int i;
 
 	if (pdev->dev.of_node) {
 		plat = msmsdcc_populate_pdata(&pdev->dev);
@@ -5390,56 +5437,21 @@
 		pr_err("%s: Invalid resource\n", __func__);
 		return -ENXIO;
 	}
-	if (pdev->dev.of_node) {
-		/*
-		 * Device tree iomem resources are only accessible by index.
-		 * index = 0 -> SDCC register interface
-		 * index = 1 -> DML register interface
-		 * index = 2 -> BAM register interface
-		 * IRQ resources:
-		 * index = 0 -> SDCC IRQ
-		 * index = 1 -> BAM IRQ
-		 */
-		core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		dml_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-		bam_memres = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-		core_irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-		bam_irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
-	} else {
-		for (i = 0; i < pdev->num_resources; i++) {
-			if (pdev->resource[i].flags & IORESOURCE_MEM) {
-				if (!strncmp(pdev->resource[i].name,
-						"sdcc_dml_addr",
-						sizeof("sdcc_dml_addr")))
-					dml_memres = &pdev->resource[i];
-				else if (!strncmp(pdev->resource[i].name,
-						"sdcc_bam_addr",
-						sizeof("sdcc_bam_addr")))
-					bam_memres = &pdev->resource[i];
-				else
-					core_memres = &pdev->resource[i];
 
-			}
-			if (pdev->resource[i].flags & IORESOURCE_IRQ) {
-				if (!strncmp(pdev->resource[i].name,
-						"sdcc_bam_irq",
-						sizeof("sdcc_bam_irq")))
-					bam_irqres = &pdev->resource[i];
-				else
-					core_irqres = &pdev->resource[i];
-			}
-			if (pdev->resource[i].flags & IORESOURCE_DMA) {
-				if (!strncmp(pdev->resource[i].name,
-						"sdcc_dma_chnl",
-						sizeof("sdcc_dma_chnl")))
-					dmares = &pdev->resource[i];
-				else if (!strncmp(pdev->resource[i].name,
-						"sdcc_dma_crci",
-						sizeof("sdcc_dma_crci")))
-					dma_crci_res = &pdev->resource[i];
-			}
-		}
-	}
+	core_memres = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "core_mem");
+	bam_memres = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "bam_mem");
+	dml_memres = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "dml_mem");
+	core_irqres = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, "core_irq");
+	bam_irqres = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, "bam_irq");
+	dmares = platform_get_resource_byname(pdev,
+			IORESOURCE_DMA, "dma_chnl");
+	dma_crci_res = platform_get_resource_byname(pdev,
+			IORESOURCE_DMA, "dma_crci");
 
 	if (!core_irqres || !core_memres) {
 		pr_err("%s: Invalid sdcc core resource\n", __func__);
@@ -5631,7 +5643,7 @@
 	mmc->caps |= plat->mmc_bus_width;
 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
 	mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
-
+	mmc->caps |= MMC_CAP_HW_RESET;
 	/*
 	 * If we send the CMD23 before multi block write/read command
 	 * then we need not to send CMD12 at the end of the transfer.
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbefe67..3c79917 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -900,8 +900,8 @@
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
-	ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
-	ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+	ring->rx_max_pending = virtqueue_get_impl_size(vi->rvq);
+	ring->tx_max_pending = virtqueue_get_impl_size(vi->svq);
 	ring->rx_pending = ring->rx_max_pending;
 	ring->tx_pending = ring->tx_max_pending;
 
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 7695778..c0a4e0e 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -21,6 +21,7 @@
 #include <linux/workqueue.h>
 #include <linux/jiffies.h>
 #include <linux/gpio.h>
+#include <linux/wakelock.h>
 #include <mach/peripheral-loader.h>
 
 #define DEVICE "wcnss_wlan"
@@ -48,6 +49,7 @@
 	void		(*tm_notify)(struct device *, int);
 	struct wcnss_wlan_config wlan_config;
 	struct delayed_work wcnss_work;
+	struct wake_lock wcnss_wake_lock;
 } *penv = NULL;
 
 static ssize_t wcnss_serial_number_show(struct device *dev,
@@ -60,7 +62,7 @@
 }
 
 static ssize_t wcnss_serial_number_store(struct device *dev,
-		struct device_attribute *attr, const char * buf, size_t count)
+		struct device_attribute *attr, const char *buf, size_t count)
 {
 	unsigned int value;
 
@@ -88,7 +90,7 @@
 }
 
 static ssize_t wcnss_thermal_mitigation_store(struct device *dev,
-		struct device_attribute *attr, const char * buf, size_t count)
+		struct device_attribute *attr, const char *buf, size_t count)
 {
 	int value;
 
@@ -326,6 +328,20 @@
 	return 0;
 }
 
+void wcnss_prevent_suspend()
+{
+	if (penv)
+		wake_lock(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_prevent_suspend);
+
+void wcnss_allow_suspend()
+{
+	if (penv)
+		wake_unlock(&penv->wcnss_wake_lock);
+}
+EXPORT_SYMBOL(wcnss_allow_suspend);
+
 static int
 wcnss_trigger_config(struct platform_device *pdev)
 {
@@ -398,6 +414,8 @@
 	if (ret)
 		goto fail_sysfs;
 
+	wake_lock_init(&penv->wcnss_wake_lock, WAKE_LOCK_SUSPEND, "wcnss");
+
 	return 0;
 
 fail_sysfs:
diff --git a/drivers/of/of_slimbus.c b/drivers/of/of_slimbus.c
index 512ca73..8aaef25 100644
--- a/drivers/of/of_slimbus.c
+++ b/drivers/of/of_slimbus.c
@@ -66,6 +66,8 @@
 			kfree(slim);
 			return -ENOMEM;
 		}
+
+		slim->dev.of_node = of_node_get(node);
 		slim->name = (const char *)name;
 		binfo[n].bus_num = ctrl->nr;
 		binfo[n].slim_slave = slim;
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index 708d658..6f9af36 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -1,4 +1,5 @@
 /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,6 +28,8 @@
 #include <linux/qpnp/pwm.h>
 
 #define QPNP_LPG_DRIVER_NAME	"qcom,qpnp-pwm"
+#define QPNP_LPG_CHANNEL_BASE	"qpnp-lpg-channel-base"
+#define QPNP_LPG_LUT_BASE	"qpnp-lpg-lut-base"
 
 /* LPG Control for LPG_PATTERN_CONFIG */
 #define QPNP_RAMP_DIRECTION_SHIFT	4
@@ -207,26 +210,19 @@
 
 static RADIX_TREE(lpg_dev_tree, GFP_KERNEL);
 
-struct qpnp_lut_default_config {
-	u32		*duty_pct_list;
-	int		size;
-	int		start_idx;
-};
-
 struct qpnp_lut_config {
-	struct qpnp_lut_default_config def_config;
-	u8		*duty_pct_list;
-	int		list_size;
-	int		lo_index;
-	int		hi_index;
-	int		lut_pause_hi_cnt;
-	int		lut_pause_lo_cnt;
-	int		ramp_step_ms;
-	bool		ramp_direction;
-	bool		pattern_repeat;
-	bool		ramp_toggle;
-	bool		enable_pause_hi;
-	bool		enable_pause_lo;
+	u8	*duty_pct_list;
+	int	list_len;
+	int	lo_index;
+	int	hi_index;
+	int	lut_pause_hi_cnt;
+	int	lut_pause_lo_cnt;
+	int	ramp_step_ms;
+	bool	ramp_direction;
+	bool	pattern_repeat;
+	bool	ramp_toggle;
+	bool	enable_pause_hi;
+	bool	enable_pause_lo;
 };
 
 struct qpnp_lpg_config {
@@ -234,8 +230,6 @@
 	u16			base_addr;
 	u16			lut_base_addr;
 	u16			lut_size;
-	bool			bypass_lut;
-	bool			lpg_configured;
 };
 
 struct qpnp_pwm_config {
@@ -304,6 +298,8 @@
 
 #define QPNP_ENABLE_LUT_CONTROL(p_val)	qpnp_set_control(p_val, 1, 1, 1, 0, 1)
 #define QPNP_ENABLE_PWM_CONTROL(p_val)	qpnp_set_control(p_val, 1, 1, 0, 1, 0)
+#define QPNP_IS_PWM_CONFIG_SELECTED(val) (val & QPNP_PWM_SRC_SELECT_MASK)
+
 
 static inline void qpnp_convert_to_lut_flags(int *flags,
 				struct qpnp_lut_config *l_config)
@@ -316,10 +312,10 @@
 }
 
 static inline void qpnp_set_lut_params(struct lut_params *l_params,
-				struct qpnp_lut_config *l_config)
+		struct qpnp_lut_config *l_config, int s_idx, int size)
 {
-	l_params->start_idx = l_config->def_config.start_idx;
-	l_params->idx_len = l_config->def_config.size;
+	l_params->start_idx = s_idx;
+	l_params->idx_len = size;
 	l_params->lut_pause_hi = l_config->lut_pause_hi_cnt;
 	l_params->lut_pause_lo = l_config->lut_pause_lo_cnt;
 	l_params->ramp_step_ms = l_config->ramp_step_ms;
@@ -442,7 +438,7 @@
 	struct qpnp_lut_config	*lut = &chip->lpg_config.lut_config;
 	int			i, pwm_size, rc = 0;
 	int			burst_size = SPMI_MAX_BUF_LEN;
-	int			list_len = lut->list_size << 1;
+	int			list_len = lut->list_len << 1;
 	int			offset = lut->lo_index << 2;
 
 	pwm_size = QPNP_GET_PWM_SIZE(
@@ -451,15 +447,15 @@
 
 	max_pwm_value = (1 << pwm_size) - 1;
 
-	if (unlikely(lut->list_size != (lut->hi_index - lut->lo_index + 1))) {
+	if (unlikely(lut->list_len != (lut->hi_index - lut->lo_index + 1))) {
 		pr_err("LUT internal Data structure corruption detected\n");
-		pr_err("LUT list size: %d\n", lut->list_size);
+		pr_err("LUT list size: %d\n", lut->list_len);
 		pr_err("However, index size is: %d\n",
 				(lut->hi_index - lut->lo_index + 1));
 		return -EINVAL;
 	}
 
-	for (i = 0; i <= lut->list_size; i++) {
+	for (i = 0; i <= lut->list_len; i++) {
 		if (raw_value)
 			pwm_value = duty_pct[i];
 		else
@@ -597,7 +593,7 @@
 		lpg_config->base_addr, QPNP_LPG_PWM_TYPE_CONFIG, 1, chip);
 }
 
-static int qpnp_pwm_configure_control(struct pwm_device *pwm)
+static int qpnp_configure_pwm_control(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -615,7 +611,7 @@
 
 }
 
-static int qpnp_lpg_configure_control(struct pwm_device *pwm)
+static int qpnp_configure_lpg_control(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -789,7 +785,7 @@
 		pr_err("Failed to configure LUT pattern");
 		return rc;
 	}
-	rc = qpnp_lpg_configure_control(pwm);
+	rc = qpnp_configure_lpg_control(pwm);
 	if (rc) {
 		pr_err("Failed to configure pause registers");
 		return rc;
@@ -829,7 +825,7 @@
 		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
 }
 
-static int qpnp_lpg_disable_lut(struct pwm_device *pwm)
+static int qpnp_disable_lut(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -863,7 +859,7 @@
 		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
 }
 
-static int qpnp_lpg_disable_pwm(struct pwm_device *pwm)
+static int qpnp_disable_pwm(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -914,15 +910,13 @@
 		return rc;
 	}
 
-	rc = qpnp_pwm_configure_control(pwm);
+	rc = qpnp_configure_pwm_control(pwm);
 	if (rc) {
 		pr_err("Could not update PWM control for");
 		pr_err("channel %d rc=%d\n", pwm_config->channel_id, rc);
 		return rc;
 	}
 
-	pwm->chip->lpg_config.lpg_configured = 1;
-
 	pr_debug("duty/period=%u/%u usec: pwm_value=%d (of %d)\n",
 		 (unsigned)duty_us, (unsigned)period_us,
 		 pwm_config->pwm_value, 1 << period->pwm_size);
@@ -935,8 +929,6 @@
 {
 	struct qpnp_lpg_config		*lpg_config;
 	struct qpnp_lut_config		*lut_config;
-	struct qpnp_lut_default_config  *def_lut_config =
-					&lut_config->def_config;
 	struct pwm_period_config	*period;
 	struct qpnp_pwm_config		*pwm_config;
 	int				start_idx = lut_params.start_idx;
@@ -948,23 +940,6 @@
 	pwm_config = &pwm->pwm_config;
 	lpg_config = &pwm->chip->lpg_config;
 	lut_config = &lpg_config->lut_config;
-	def_lut_config = &lut_config->def_config;
-
-	if ((start_idx + len) > lpg_config->lut_size) {
-		pr_err("Exceed LUT limit\n");
-		return -EINVAL;
-	}
-	if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
-		(unsigned)period_us < PM_PWM_PERIOD_MIN) {
-		pr_err("Period out of range\n");
-		return -EINVAL;
-	}
-
-	if (!pwm_config->in_use) {
-		pr_err("channel_id: %d: stale handle?\n",
-				pwm_config->channel_id);
-		return -EINVAL;
-	}
 
 	period = &pwm_config->period;
 
@@ -981,37 +956,10 @@
 	if (flags & PM_PWM_LUT_USE_RAW_VALUE)
 		raw_lut = 1;
 
-	lut_config->list_size = len;
+	lut_config->list_len = len;
 	lut_config->lo_index = start_idx;
 	lut_config->hi_index = start_idx + len - 1;
 
-	/*
-	 * LUT may not be specified in device tree by default.
-	 * This is the first time user is configuring it.
-	 */
-	if (lpg_config->bypass_lut) {
-		def_lut_config->duty_pct_list = kzalloc(sizeof(u32) *
-							len, GFP_KERNEL);
-		if (!def_lut_config->duty_pct_list) {
-			pr_err("kzalloc failed on def_duty_pct_list\n");
-			return -ENOMEM;
-		}
-
-		lut_config->duty_pct_list = kzalloc(lpg_config->lut_size *
-						sizeof(u16), GFP_KERNEL);
-		if (!lut_config->duty_pct_list) {
-			pr_err("kzalloc failed on duty_pct_list\n");
-			kfree(def_lut_config->duty_pct_list);
-			return -ENOMEM;
-		}
-
-		def_lut_config->size = len;
-		def_lut_config->start_idx = start_idx;
-		memcpy(def_lut_config->duty_pct_list, duty_pct, len);
-
-		lpg_config->bypass_lut = 0;
-	}
-
 	rc = qpnp_lpg_change_table(pwm, duty_pct, raw_lut);
 	if (rc) {
 		pr_err("qpnp_lpg_change_table: rc=%d\n", rc);
@@ -1041,12 +989,28 @@
 	lut_config->ramp_toggle	    = !!(flags & PM_PWM_LUT_REVERSE);
 	lut_config->enable_pause_hi = !!(flags & PM_PWM_LUT_PAUSE_HI_EN);
 	lut_config->enable_pause_lo = !!(flags & PM_PWM_LUT_PAUSE_LO_EN);
-	lpg_config->bypass_lut = 0;
 
 	rc = qpnp_lpg_change_lut(pwm);
 
-	if (!rc)
-		lpg_config->lpg_configured = 1;
+	return rc;
+}
+
+static int _pwm_enable(struct pwm_device *pwm)
+{
+	int rc;
+	struct qpnp_lpg_chip *chip;
+
+	chip = pwm->chip;
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	if (QPNP_IS_PWM_CONFIG_SELECTED(
+		chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]))
+		rc = qpnp_lpg_enable_pwm(pwm);
+	else
+		rc = qpnp_lpg_enable_lut(pwm);
+
+	mutex_unlock(&pwm->chip->lpg_mutex);
 
 	return rc;
 }
@@ -1108,11 +1072,10 @@
 	pwm_config = &pwm->pwm_config;
 
 	if (pwm_config->in_use) {
-		qpnp_lpg_disable_pwm(pwm);
-		qpnp_lpg_disable_lut(pwm);
+		qpnp_disable_pwm(pwm);
+		qpnp_disable_lut(pwm);
 		pwm_config->in_use = 0;
 		pwm_config->lable = NULL;
-		pwm->chip->lpg_config.lpg_configured = 0;
 	}
 
 	mutex_unlock(&pwm->chip->lpg_mutex);
@@ -1155,43 +1118,20 @@
 int pwm_enable(struct pwm_device *pwm)
 {
 	struct qpnp_pwm_config	*p_config;
-	struct qpnp_lpg_chip	*chip;
-	int			rc = 0;
 
 	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
 		pr_err("Invalid pwm handle or no pwm_chip\n");
 		return -EINVAL;
 	}
 
-	mutex_lock(&pwm->chip->lpg_mutex);
-
-	chip = pwm->chip;
 	p_config = &pwm->pwm_config;
 
 	if (!p_config->in_use) {
 		pr_err("channel_id: %d: stale handle?\n", p_config->channel_id);
-		rc = -EINVAL;
-		goto out_unlock;
+		return -EINVAL;
 	}
 
-	if (!pwm->chip->lpg_config.lpg_configured) {
-		pr_err("Request received to enable PWM for channel Id: %d\n",
-							p_config->channel_id);
-		pr_err("However, PWM isn't configured\n");
-		pr_err("falling back to defaultconfiguration\n");
-		rc = _pwm_config(pwm, p_config->pwm_duty,
-					p_config->pwm_period);
-		if (rc) {
-			pr_err("Could not apply default PWM config\n");
-			goto out_unlock;
-		}
-	}
-
-	rc = qpnp_lpg_enable_pwm(pwm);
-
-out_unlock:
-	mutex_unlock(&pwm->chip->lpg_mutex);
-	return rc;
+	return _pwm_enable(pwm);
 }
 EXPORT_SYMBOL_GPL(pwm_enable);
 
@@ -1215,21 +1155,50 @@
 	pwm_config = &pwm->pwm_config;
 
 	if (pwm_config->in_use) {
-		if (!pwm->chip->lpg_config.lpg_configured) {
-			pr_err("Request received to disable PWM for\n");
-			pr_err("channel Id: %d\n", pwm_config->channel_id);
-			pr_err("However PWM is not configured by any means\n");
-			goto out_unlock;
-		}
-		qpnp_lpg_disable_pwm(pwm);
+		if (QPNP_IS_PWM_CONFIG_SELECTED(
+			chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]))
+			qpnp_disable_pwm(pwm);
+		else
+			qpnp_disable_lut(pwm);
 	}
 
-out_unlock:
 	mutex_unlock(&pwm->chip->lpg_mutex);
 }
 EXPORT_SYMBOL_GPL(pwm_disable);
 
 /**
+ * pwm_change_mode - Change the PWM mode configuration
+ * @pwm: the PWM device
+ * @mode: Mode selection value
+ */
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
+{
+	int rc;
+
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle or no pwm_chip\n");
+		return -EINVAL;
+	}
+
+	if (mode < PM_PWM_MODE_PWM || mode > PM_PWM_MODE_LPG) {
+		pr_err("Invalid mode value\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	if (mode)
+		rc = qpnp_configure_lpg_control(pwm);
+	else
+		rc = qpnp_configure_pwm_control(pwm);
+
+	mutex_unlock(&pwm->chip->lpg_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_change_mode);
+
+/**
  * pwm_config_period - change PWM period
  *
  * @pwm: the PWM device
@@ -1356,11 +1325,29 @@
 	if (pwm->chip == NULL)
 		return -ENODEV;
 
+	if (!pwm->pwm_config.in_use) {
+		pr_err("channel_id: %d: stale handle?\n",
+				pwm->pwm_config.channel_id);
+		return -EINVAL;
+	}
+
 	if (duty_pct == NULL && !(lut_params.flags & PM_PWM_LUT_NO_TABLE)) {
 		pr_err("Invalid duty_pct with flag\n");
 		return -EINVAL;
 	}
 
+	if ((lut_params.start_idx + lut_params.idx_len) >
+				pwm->chip->lpg_config.lut_size) {
+		pr_err("Exceed LUT limit\n");
+		return -EINVAL;
+	}
+
+	if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN) {
+		pr_err("Period out of range\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&pwm->chip->lpg_mutex);
 
 	rc = _pwm_lut_config(pwm, period_us, duty_pct, lut_params);
@@ -1371,87 +1358,136 @@
 }
 EXPORT_SYMBOL_GPL(pwm_lut_config);
 
-/**
- * pwm_lut_enable - control a PWM device to start/stop LUT ramp
- * @pwm: the PWM device
- * @start: to start (1), or stop (0)
- */
-int pwm_lut_enable(struct pwm_device *pwm, int start)
+static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node,
+		struct device_node *of_parent, struct qpnp_lpg_chip *chip)
 {
-	struct qpnp_lpg_config	*lpg_config;
-	struct qpnp_pwm_config	*p_config;
-	struct lut_params	lut_params;
-	int			rc = 0;
+	int rc, period;
+	struct pwm_device *pwm_dev = &chip->pwm_dev;
 
-	if (pwm == NULL || IS_ERR(pwm)) {
-		pr_err("Invalid pwm handle\n");
+	rc = of_property_read_u32(of_parent, "qcom,period", (u32 *)&period);
+	if (rc) {
+		pr_err("node is missing PWM Period prop");
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_pwm_node, "qcom,duty",
+				&pwm_dev->pwm_config.pwm_duty);
+	if (rc) {
+		pr_err("node is missing PWM Duty prop");
+		return rc;
+	}
+
+	rc = _pwm_config(pwm_dev, pwm_dev->pwm_config.pwm_duty, period);
+
+	return rc;
+}
+
+#define qpnp_check_optional_dt_bindings(func)	\
+do {					\
+	rc = func;			\
+	if (rc && rc != -EINVAL)	\
+		goto out;		\
+	rc = 0;				\
+} while (0);
+
+static int qpnp_parse_lpg_dt_config(struct device_node *of_lpg_node,
+		struct device_node *of_parent, struct qpnp_lpg_chip *chip)
+{
+	int rc, period, list_size, start_idx, *duty_pct_list;
+	struct pwm_device *pwm_dev = &chip->pwm_dev;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
+	struct lut_params	lut_params;
+
+	rc = of_property_read_u32(of_parent, "qcom,period", &period);
+	if (rc) {
+		pr_err("node is missing PWM Period prop");
+		return rc;
+	}
+
+	if (!of_get_property(of_lpg_node, "qcom,duty-percents", &list_size)) {
+		pr_err("node is missing duty-pct list");
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_lpg_node, "cell-index", &start_idx);
+	if (rc) {
+		pr_err("Missing start index");
+		return rc;
+	}
+
+	list_size /= sizeof(u32);
+
+	if (list_size + start_idx > lpg_config->lut_size) {
+		pr_err("duty pct list size overflows\n");
 		return -EINVAL;
 	}
 
-	if (pwm->chip == NULL)
-		return -ENODEV;
+	duty_pct_list = kzalloc(sizeof(u32) * list_size, GFP_KERNEL);
 
-	lpg_config = &pwm->chip->lpg_config;
-	p_config = &pwm->pwm_config;
-
-	mutex_lock(&pwm->chip->lpg_mutex);
-
-	if (start) {
-		if (!lpg_config->lpg_configured) {
-			pr_err("Request received to enable LUT for\n");
-			pr_err("LPG channel %d\n", pwm->pwm_config.channel_id);
-			pr_err("But LPG is not configured, falling back to\n");
-			pr_err(" default LUT configuration if available\n");
-
-			if (lpg_config->bypass_lut) {
-				pr_err("No default LUT configuration found\n");
-				pr_err("Use pwm_lut_config() to configure\n");
-				rc = -EINVAL;
-				goto out;
-			}
-
-			qpnp_set_lut_params(&lut_params,
-					&lpg_config->lut_config);
-
-			rc = _pwm_lut_config(pwm, p_config->pwm_period,
-			(int *)lpg_config->lut_config.def_config.duty_pct_list,
-			lut_params);
-			if (rc) {
-				pr_err("Could not set the default LUT conf\n");
-				goto out;
-			}
-		}
-
-		rc = qpnp_lpg_enable_lut(pwm);
-	} else {
-		if (unlikely(!lpg_config->lpg_configured)) {
-			pr_err("LPG isn't configured\n");
-			rc = -EINVAL;
-			goto out;
-		}
-		rc = qpnp_lpg_disable_lut(pwm);
+	if (!duty_pct_list) {
+		pr_err("kzalloc failed on duty_pct_list\n");
+		return -ENOMEM;
 	}
 
+	rc = of_property_read_u32_array(of_lpg_node, "qcom,duty-percents",
+						duty_pct_list, list_size);
+	if (rc) {
+		pr_err("invalid or missing property:\n");
+		pr_err("qcom,duty-pcts-list\n");
+		kfree(duty_pct_list);
+		return rc;
+	}
+
+	/* Read optional properties */
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+		"qcom,ramp-step-duration", &lut_config->ramp_step_ms));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+		"qcom,lpg-lut-pause-hi", &lut_config->lut_pause_hi_cnt));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+		"qcom,lpg-lut-pause-lo", &lut_config->lut_pause_lo_cnt));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-ramp-direction",
+				(u32 *)&lut_config->ramp_direction));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-pattern-repeat",
+				(u32 *)&lut_config->pattern_repeat));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-ramp-toggle",
+				(u32 *)&lut_config->ramp_toggle));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-enable-pause-hi",
+				(u32 *)&lut_config->enable_pause_hi));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-enable-pause-lo",
+				(u32 *)&lut_config->enable_pause_lo));
+
+	qpnp_set_lut_params(&lut_params, lut_config, start_idx, list_size);
+
+	_pwm_lut_config(pwm_dev, period, duty_pct_list, lut_params);
+
 out:
-	mutex_unlock(&pwm->chip->lpg_mutex);
+	kfree(duty_pct_list);
 	return rc;
 }
-EXPORT_SYMBOL_GPL(pwm_lut_enable);
 
 /* Fill in lpg device elements based on values found in device tree. */
-static int qpnp_lpg_get_dt_config(struct spmi_device *spmi,
+static int qpnp_parse_dt_config(struct spmi_device *spmi,
 					struct qpnp_lpg_chip *chip)
 {
-	int			rc;
+	int			rc, enable;
+	const char		*lable;
 	struct resource		*res;
+	struct device_node	*node;
+	int found_pwm_subnode = 0;
+	int found_lpg_subnode = 0;
 	struct device_node	*of_node = spmi->dev.of_node;
-	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
 	struct pwm_device	*pwm_dev = &chip->pwm_dev;
-	struct qpnp_lut_config	*lut_config = &chip->lpg_config.lut_config;
-	struct qpnp_lut_default_config	*def_lut_config =
-						&lut_config->def_config;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
 
-	res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+	res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+					QPNP_LPG_CHANNEL_BASE);
 	if (!res) {
 		dev_err(&spmi->dev, "%s: node is missing base address\n",
 			__func__);
@@ -1460,7 +1496,8 @@
 
 	lpg_config->base_addr = res->start;
 
-	res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 1);
+	res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+						QPNP_LPG_LUT_BASE);
 	if (!res) {
 		dev_err(&spmi->dev, "%s: node is missing LUT base address\n",
 								__func__);
@@ -1471,88 +1508,68 @@
 	/* Each entry of LUT is of 2 bytes */
 	lpg_config->lut_size = resource_size(res) >> 1;
 
+	lut_config->duty_pct_list = kzalloc(lpg_config->lut_size *
+						sizeof(u16), GFP_KERNEL);
+	if (!lut_config->duty_pct_list) {
+		pr_err("can not allocate duty pct list\n");
+		return -ENOMEM;
+	}
 
 	rc = of_property_read_u32(of_node, "qcom,channel-id",
 				&pwm_dev->pwm_config.channel_id);
 	if (rc) {
-		dev_err(&spmi->dev, "%s: node is missing LPG channel id",
+		dev_err(&spmi->dev, "%s: node is missing LPG channel id\n",
 								__func__);
-		return rc;
+		goto out;
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,period",
-				&pwm_dev->pwm_config.pwm_period);
-	if (rc) {
-		dev_err(&spmi->dev, "%s: node is missing PWM Period value",
+	for_each_child_of_node(of_node, node) {
+		rc = of_property_read_string(node, "label", &lable);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: Missing lable property\n",
 								__func__);
-		return rc;
+			goto out;
+		}
+		if (!strncmp(lable, "pwm", 3)) {
+			rc = qpnp_parse_pwm_dt_config(node, of_node, chip);
+			if (rc)
+				goto out;
+			found_pwm_subnode = 1;
+		} else if (!strncmp(lable, "lpg", 3)) {
+			qpnp_parse_lpg_dt_config(node, of_node, chip);
+			if (rc)
+				goto out;
+			found_lpg_subnode = 1;
+		} else {
+			dev_err(&spmi->dev, "%s: Invalid value for lable prop",
+								__func__);
+		}
 	}
 
-	if (!of_get_property(of_node, "qcom,duty-percents",
-						&def_lut_config->size)) {
-		lpg_config->bypass_lut = 1;
-	}
-
-	if (lpg_config->bypass_lut)
+	rc = of_property_read_u32(of_node, "qcom,mode-select", &enable);
+	if (rc)
 		goto read_opt_props;
 
-	rc = of_property_read_u32(of_node, "qcom,start-index",
-					&def_lut_config->start_idx);
-
-	if (rc) {
-		dev_err(&spmi->dev, "Missing start index");
-		return rc;
+	if ((enable == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
+		(enable == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
+		dev_err(&spmi->dev, "%s: Invalid mode select\n", __func__);
+		rc = -EINVAL;
+		goto out;
 	}
 
-	def_lut_config->size /= sizeof(u32);
-
-	def_lut_config->duty_pct_list = kzalloc(sizeof(u32) *
-					def_lut_config->size, GFP_KERNEL);
-	if (!def_lut_config->duty_pct_list) {
-		dev_err(&spmi->dev, "%s: kzalloc failed on duty_pct_list\n",
-								__func__);
-		return -ENOMEM;
-	}
-
-	rc = of_property_read_u32_array(of_node, "qcom,duty-percents",
-		def_lut_config->duty_pct_list, def_lut_config->size);
-	if (rc) {
-		dev_err(&spmi->dev, "invalid or missing property:\n");
-		dev_err(&spmi->dev, "qcom,duty-pcts-list\n");
-		kfree(def_lut_config->duty_pct_list);
-		return rc;
-	}
-
-	lut_config->duty_pct_list = kzalloc(lpg_config->lut_size * sizeof(u16),
-								GFP_KERNEL);
-	if (!lut_config->duty_pct_list) {
-		dev_err(&spmi->dev, "can not allocate duty pct list\n");
-		kfree(def_lut_config->duty_pct_list);
-		return -ENOMEM;
-	}
+	pwm_change_mode(pwm_dev, enable);
+	_pwm_enable(pwm_dev);
 
 read_opt_props:
 	/* Initialize optional config parameters from DT if provided */
-	of_property_read_u32(of_node, "qcom,duty",
-					&pwm_dev->pwm_config.pwm_duty);
-	of_property_read_u32(of_node, "qcom,ramp-step-duration",
-					&lut_config->ramp_step_ms);
-	of_property_read_u32(of_node, "qcom,lpg-lut-pause-hi",
-					&lut_config->lut_pause_hi_cnt);
-	of_property_read_u32(of_node, "qcom,lpg-lut-pause-lo",
-					&lut_config->lut_pause_lo_cnt);
-	of_property_read_u32(of_node, "qcom,lpg-lut-ramp-direction",
-					(u32 *)&lut_config->ramp_direction);
-	of_property_read_u32(of_node, "qcom,lpg-lut-pattern-repeat",
-					(u32 *)&lut_config->pattern_repeat);
-	of_property_read_u32(of_node, "qcom,lpg-lut-ramp-toggle",
-					(u32 *)&lut_config->ramp_toggle);
-	of_property_read_u32(of_node, "qcom,lpg-lut-enable-pause-hi",
-					(u32 *)&lut_config->enable_pause_hi);
-	of_property_read_u32(of_node, "qcom,lpg-lut-enable-pause-lo",
-					(u32 *)&lut_config->enable_pause_lo);
+	of_property_read_string(node, "qcom,channel-owner",
+				&pwm_dev->pwm_config.lable);
 
 	return 0;
+
+out:
+	kfree(lut_config->duty_pct_list);
+	return rc;
 }
 
 static int __devinit qpnp_pwm_probe(struct spmi_device *spmi)
@@ -1572,7 +1589,7 @@
 	chip->pwm_dev.chip = chip;
 	dev_set_drvdata(&spmi->dev, chip);
 
-	rc = qpnp_lpg_get_dt_config(spmi, chip);
+	rc = qpnp_parse_dt_config(spmi, chip);
 
 	if (rc)
 		goto failed_config;
@@ -1610,7 +1627,6 @@
 	if (chip) {
 		lpg_config = &chip->lpg_config;
 		kfree(lpg_config->lut_config.duty_pct_list);
-		kfree(lpg_config->lut_config.def_config.duty_pct_list);
 		mutex_destroy(&chip->lpg_mutex);
 		kfree(chip);
 	}
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index d3edfa8..b7c73de 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -18,13 +18,14 @@
 #include <linux/io.h>
 #include <linux/stat.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/usb/msm_hsusb.h>
 #include <mach/usb_bam.h>
 #include <mach/sps.h>
 #include <linux/workqueue.h>
 
 #define USB_SUMMING_THRESHOLD 512
-#define CONNECTIONS_NUM		4
+#define CONNECTIONS_NUM	4
 
 static struct sps_bam_props usb_props;
 static struct sps_pipe *sps_pipes[CONNECTIONS_NUM][2];
@@ -43,32 +44,35 @@
 
 struct usb_bam_connect_info {
 	u8 idx;
-	u8 *src_pipe;
-	u8 *dst_pipe;
+	u32 *src_pipe;
+	u32 *dst_pipe;
 	struct usb_bam_wake_event_info peer_event;
 	bool enabled;
 };
 
 static struct usb_bam_connect_info usb_bam_connections[CONNECTIONS_NUM];
+static struct usb_bam_pipe_connect ***msm_usb_bam_connections_info;
+static struct usb_bam_pipe_connect *bam_connection_arr;
+
+static bool device_tree_enabled;
 
 static inline int bam_offset(struct msm_usb_bam_platform_data *pdata)
 {
 	return pdata->usb_active_bam * CONNECTIONS_NUM * 2;
 }
 
-static int connect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
-						u8 *usb_pipe_idx)
+static int connect_pipe(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+						u32 *usb_pipe_idx)
 {
 	int ret;
-	struct sps_pipe **pipe = &sps_pipes[connection_idx][pipe_dir];
+	struct sps_pipe **pipe = &sps_pipes[conn_idx][pipe_dir];
 	struct sps_connect *connection =
-		&sps_connections[connection_idx][pipe_dir];
+		&sps_connections[conn_idx][pipe_dir];
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 	struct usb_bam_pipe_connect *pipe_connection =
-			(struct usb_bam_pipe_connect *)(pdata->connections +
-			 bam_offset(pdata) + (2*connection_idx+pipe_dir));
+		(struct usb_bam_pipe_connect *)(pdata->connections +
+			 bam_offset(pdata) + (2*conn_idx+pipe_dir));
 
 	*pipe = sps_alloc_endpoint();
 	if (*pipe == NULL) {
@@ -105,26 +109,54 @@
 		*usb_pipe_idx = connection->dest_pipe_index;
 	}
 
-	ret = sps_setup_bam2bam_fifo(
-				&data_mem_buf[connection_idx][pipe_dir],
+	if (!device_tree_enabled) {
+		ret = sps_setup_bam2bam_fifo(
+				&data_mem_buf[conn_idx][pipe_dir],
 				pipe_connection->data_fifo_base_offset,
 				pipe_connection->data_fifo_size, 1);
-	if (ret) {
-		pr_err("%s: data fifo setup failure %d\n", __func__, ret);
-		goto fifo_setup_error;
-	}
-	connection->data = data_mem_buf[connection_idx][pipe_dir];
+		if (ret) {
+			pr_err("%s: data fifo setup failure %d\n", __func__,
+				ret);
+			goto fifo_setup_error;
+		}
 
-	ret = sps_setup_bam2bam_fifo(
-				&desc_mem_buf[connection_idx][pipe_dir],
+		ret = sps_setup_bam2bam_fifo(
+				&desc_mem_buf[conn_idx][pipe_dir],
 				pipe_connection->desc_fifo_base_offset,
 				pipe_connection->desc_fifo_size, 1);
-	if (ret) {
-		pr_err("%s: desc. fifo setup failure %d\n", __func__, ret);
-		goto fifo_setup_error;
+		if (ret) {
+			pr_err("%s: desc. fifo setup failure %d\n", __func__,
+				ret);
+			goto fifo_setup_error;
+		}
+	} else {
+		data_mem_buf[conn_idx][pipe_dir].phys_base =
+			pipe_connection->data_fifo_base_offset +
+				pdata->usb_base_address;
+		data_mem_buf[conn_idx][pipe_dir].size =
+			pipe_connection->data_fifo_size;
+		data_mem_buf[conn_idx][pipe_dir].base =
+			ioremap(data_mem_buf[conn_idx][pipe_dir].phys_base,
+				data_mem_buf[conn_idx][pipe_dir].size);
+		memset(data_mem_buf[conn_idx][pipe_dir].base, 0,
+			data_mem_buf[conn_idx][pipe_dir].size);
+
+		desc_mem_buf[conn_idx][pipe_dir].phys_base =
+			pipe_connection->desc_fifo_base_offset +
+				pdata->usb_base_address;
+		desc_mem_buf[conn_idx][pipe_dir].size =
+			pipe_connection->desc_fifo_size;
+		desc_mem_buf[conn_idx][pipe_dir].base =
+			ioremap(desc_mem_buf[conn_idx][pipe_dir].phys_base,
+				desc_mem_buf[conn_idx][pipe_dir].size);
+		memset(desc_mem_buf[conn_idx][pipe_dir].base, 0,
+			desc_mem_buf[conn_idx][pipe_dir].size);
 	}
-	connection->desc = desc_mem_buf[connection_idx][pipe_dir];
+
+	connection->data = data_mem_buf[conn_idx][pipe_dir];
+	connection->desc = desc_mem_buf[conn_idx][pipe_dir];
 	connection->event_thresh = 16;
+	connection->options = SPS_O_AUTO_ENABLE;
 
 	ret = sps_connect(*pipe, connection);
 	if (ret < 0) {
@@ -141,7 +173,22 @@
 	return ret;
 }
 
-int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx)
+
+static int disconnect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
+						u32 *usb_pipe_idx)
+{
+	struct sps_pipe *pipe = sps_pipes[connection_idx][pipe_dir];
+	struct sps_connect *connection =
+		&sps_connections[connection_idx][pipe_dir];
+
+	sps_disconnect(pipe);
+	sps_free_endpoint(pipe);
+
+	connection->options &= ~SPS_O_AUTO_ENABLE;
+	return 0;
+}
+
+int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx)
 {
 	struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
 	int ret;
@@ -153,7 +200,7 @@
 	}
 
 	if (connection->enabled) {
-		pr_info("%s: connection %d was already established\n",
+		pr_debug("%s: connection %d was already established\n",
 			__func__, idx);
 		return 0;
 	}
@@ -161,19 +208,23 @@
 	connection->dst_pipe = dst_pipe_idx;
 	connection->idx = idx;
 
-	/* open USB -> Peripheral pipe */
-	ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
-					   connection->src_pipe);
-	if (ret) {
-		pr_err("%s: src pipe connection failure\n", __func__);
-		return ret;
+	if (src_pipe_idx) {
+		/* open USB -> Peripheral pipe */
+		ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
+			connection->src_pipe);
+		if (ret) {
+			pr_err("%s: src pipe connection failure\n", __func__);
+			return ret;
+		}
 	}
-	/* open Peripheral -> USB pipe */
-	ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
-				 connection->dst_pipe);
-	if (ret) {
-		pr_err("%s: dst pipe connection failure\n", __func__);
-		return ret;
+	if (dst_pipe_idx) {
+		/* open Peripheral -> USB pipe */
+		ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
+			connection->dst_pipe);
+		if (ret) {
+			pr_err("%s: dst pipe connection failure\n", __func__);
+			return ret;
+		}
 	}
 	connection->enabled = 1;
 
@@ -232,19 +283,259 @@
 	return 0;
 }
 
+int usb_bam_disconnect_pipe(u8 idx)
+{
+	struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
+	int ret;
+
+	if (idx >= CONNECTIONS_NUM) {
+		pr_err("%s: Invalid connection index\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!connection->enabled) {
+		pr_debug("%s: connection %d isn't enabled\n",
+			__func__, idx);
+		return 0;
+	}
+
+	if (connection->src_pipe) {
+		/* close USB -> Peripheral pipe */
+		ret = disconnect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
+						   connection->src_pipe);
+		if (ret) {
+			pr_err("%s: src pipe connection failure\n", __func__);
+			return ret;
+		}
+
+	}
+	if (connection->dst_pipe) {
+		/* close Peripheral -> USB pipe */
+		ret = disconnect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
+			connection->dst_pipe);
+		if (ret) {
+			pr_err("%s: dst pipe connection failure\n", __func__);
+			return ret;
+		}
+	}
+
+	connection->src_pipe = 0;
+	connection->dst_pipe = 0;
+	connection->enabled = 0;
+
+	return 0;
+}
+
+static int update_connections_info(struct device_node *node, int bam,
+	int conn_num, int dir)
+{
+	u32 rc;
+	char *key = NULL;
+	uint32_t val = 0;
+
+	struct usb_bam_pipe_connect *pipe_connection;
+
+	pipe_connection = &msm_usb_bam_connections_info[bam][conn_num][dir];
+
+	key = "qcom,src-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_phy_addr = val;
+
+	key = "qcom,src-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_pipe_index = val;
+
+	key = "qcom,dst-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_phy_addr = val;
+
+	key = "qcom,dst-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_pipe_index = val;
+
+	key = "qcom,data-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_base_offset = val;
+
+	key = "qcom,data-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_size = val;
+
+	key = "qcom,descriptor-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->desc_fifo_base_offset = val;
+
+	key = "qcom,descriptor-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->desc_fifo_size = val;
+
+	return 0;
+
+err:
+	pr_err("%s: Error in name %s key %s\n", __func__,
+		node->full_name, key);
+	return -EFAULT;
+}
+
+static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
+	struct platform_device *pdev)
+{
+	struct msm_usb_bam_platform_data *pdata;
+	struct device_node *node = pdev->dev.of_node;
+	u32 i, j;
+	int conn_num, bam;
+	u8 dir;
+	u8 ncolumns = 2;
+	int bam_amount, rc = 0;
+	u32 pipe_entry = 0;
+	char *key = NULL;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("unable to allocate platform data\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-active-bam",
+		&pdata->usb_active_bam);
+	if (rc) {
+		pr_err("Invalid usb active bam property\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-total-bam-num",
+		&pdata->total_bam_num);
+	if (rc) {
+		pr_err("Invalid usb total bam num property\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-bam-num-pipes",
+		&pdata->usb_bam_num_pipes);
+	if (rc) {
+		pr_err("Invalid usb bam num pipes property\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-base-address",
+		&pdata->usb_base_address);
+	if (rc) {
+		pr_err("Invalid usb base address property\n");
+		return NULL;
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, node)
+		pipe_entry++;
+
+	/*
+	 * we need to know the number of connection, so we will know
+	 * how much memory to allocate
+	 */
+	conn_num = pipe_entry / 2;
+	bam_amount = pdata->total_bam_num;
+
+	if (conn_num > 0 && conn_num < pdata->usb_bam_num_pipes) {
+		/* alloc msm_usb_bam_connections_info */
+		bam_connection_arr = devm_kzalloc(&pdev->dev, bam_amount *
+			conn_num * ncolumns *
+			sizeof(struct usb_bam_pipe_connect), GFP_KERNEL);
+
+		if (!bam_connection_arr)
+			goto err;
+
+		msm_usb_bam_connections_info = devm_kzalloc(&pdev->dev,
+			bam_amount * sizeof(struct usb_bam_pipe_connect **),
+			GFP_KERNEL);
+
+		if (!msm_usb_bam_connections_info)
+			goto err;
+
+		for (j = 0; j < bam_amount; j++) {
+			msm_usb_bam_connections_info[j] =
+				devm_kzalloc(&pdev->dev, conn_num *
+				sizeof(struct usb_bam_pipe_connect *),
+				GFP_KERNEL);
+			for (i = 0; i < conn_num; i++)
+				msm_usb_bam_connections_info[j][i] =
+					bam_connection_arr +
+					(j * conn_num * ncolumns) +
+					(i * ncolumns);
+		}
+
+		/* retrieve device tree parameters */
+		for_each_child_of_node(pdev->dev.of_node, node) {
+			const char *str;
+
+			key = "qcom,usb-bam-type";
+			rc = of_property_read_u32(node, key, &bam);
+			if (rc)
+				goto err;
+
+			rc = of_property_read_string(node, "label", &str);
+			if (rc) {
+				pr_err("Cannot read string\n");
+				goto err;
+			}
+
+			if (strstr(str, "usb-to-peri"))
+				dir = USB_TO_PEER_PERIPHERAL;
+			else if (strstr(str, "peri-to-usb"))
+				dir = PEER_PERIPHERAL_TO_USB;
+			else
+				goto err;
+
+			if (!strcmp(str, "usb-to-peri-qdss-dwc3") ||
+				!strcmp(str, "peri-to-usb-qdss-dwc3"))
+					conn_num = 0;
+			else
+				goto err;
+
+			rc = update_connections_info(node, bam, conn_num, dir);
+			if (rc)
+				goto err;
+		}
+
+		pdata->connections = &msm_usb_bam_connections_info[0][0][0];
+
+	} else {
+		goto err;
+	}
+
+	return pdata;
+err:
+	pr_err("%s: failed\n", __func__);
+	return NULL;
+}
+
 static int usb_bam_init(void)
 {
 	u32 h_usb;
 	int ret;
 	void *usb_virt_addr;
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 	struct resource *res;
 	int irq;
 
 	res = platform_get_resource(usb_bam_pdev, IORESOURCE_MEM,
-						pdata->usb_active_bam);
+		pdata->usb_active_bam);
 	if (!res) {
 		dev_err(&usb_bam_pdev->dev, "Unable to get memory resource\n");
 		return -ENODEV;
@@ -266,6 +557,7 @@
 	usb_props.virt_size = resource_size(res);
 	usb_props.irq = irq;
 	usb_props.summing_threshold = USB_SUMMING_THRESHOLD;
+	usb_props.event_threshold = 512;
 	usb_props.num_pipes = pdata->usb_bam_num_pipes;
 
 	ret = sps_register_bam_device(&usb_props, &h_usb);
@@ -286,11 +578,10 @@
 usb_bam_show_enable(struct device *dev, struct device_attribute *attr,
 		    char *buf)
 {
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
+	struct platform_device *pdev =
+		container_of(dev, struct platform_device, dev);
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 
 	if (!pdev || !pdata)
 		return 0;
@@ -302,11 +593,10 @@
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
+	struct platform_device *pdev = container_of(dev,
+		struct platform_device, dev);
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 	char str[10], *pstr;
 	int ret, i;
 
@@ -336,6 +626,7 @@
 static int usb_bam_probe(struct platform_device *pdev)
 {
 	int ret, i;
+	struct msm_usb_bam_platform_data *pdata;
 
 	dev_dbg(&pdev->dev, "usb_bam_probe\n");
 
@@ -345,9 +636,19 @@
 			usb_bam_wake_work);
 	}
 
-	if (!pdev->dev.platform_data) {
+	if (pdev->dev.of_node) {
+		dev_dbg(&pdev->dev, "device tree enabled\n");
+		device_tree_enabled = 1;
+		pdata = usb_bam_dt_to_pdata(pdev);
+		if (!pdata)
+			return -ENOMEM;
+		pdev->dev.platform_data = pdata;
+	} else if (!pdev->dev.platform_data) {
 		dev_err(&pdev->dev, "missing platform_data\n");
 		return -ENODEV;
+	} else {
+		pdata = pdev->dev.platform_data;
+		device_tree_enabled = 0;
 	}
 	usb_bam_pdev = pdev;
 
@@ -365,6 +666,32 @@
 	return ret;
 }
 
+void get_bam2bam_connection_info(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+	u32 *usb_bam_handle, u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+	struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo)
+{
+	struct sps_connect *connection =
+		&sps_connections[conn_idx][pipe_dir];
+
+
+	if (pipe_dir == USB_TO_PEER_PERIPHERAL) {
+		*usb_bam_handle = connection->source;
+		*usb_bam_pipe_idx = connection->src_pipe_index;
+		*peer_pipe_idx = connection->dest_pipe_index;
+	} else {
+		*usb_bam_handle = connection->destination;
+		*usb_bam_pipe_idx = connection->dest_pipe_index;
+		*peer_pipe_idx = connection->src_pipe_index;
+	}
+	if (data_fifo)
+		memcpy(data_fifo, &data_mem_buf[conn_idx][pipe_dir],
+			sizeof(struct sps_mem_buffer));
+	if (desc_fifo)
+		memcpy(desc_fifo, &desc_mem_buf[conn_idx][pipe_dir],
+			sizeof(struct sps_mem_buffer));
+}
+EXPORT_SYMBOL(get_bam2bam_connection_info);
+
 static int usb_bam_remove(struct platform_device *pdev)
 {
 	destroy_workqueue(usb_bam_wq);
@@ -372,10 +699,20 @@
 	return 0;
 }
 
+static const struct of_device_id usb_bam_dt_match[] = {
+	{ .compatible = "qcom,usb-bam-msm",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, usb_bam_dt_match);
+
 static struct platform_driver usb_bam_driver = {
 	.probe = usb_bam_probe,
 	.remove = usb_bam_remove,
-	.driver = { .name = "usb_bam", },
+	.driver		= {
+		.name	= "usb_bam",
+		.of_match_table = usb_bam_dt_match,
+	},
 };
 
 static int __init init(void)
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index f84e3ac..85b653d 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -248,6 +248,7 @@
 	bool				ext_charging;
 	bool				ext_charge_done;
 	bool				iusb_fine_res;
+	bool				dc_unplug_check;
 	DECLARE_BITMAP(enabled_irqs, PM_CHG_MAX_INTS);
 	struct work_struct		battery_id_valid_work;
 	int64_t				batt_id_min;
@@ -2447,6 +2448,10 @@
 		}
 	} else if (active_path & DC_ACTIVE_BIT) {
 		pr_debug("DC charger active\n");
+		/* Some board designs are not prone to reverse boost on DC
+		 * charging path */
+		if (!chip->dc_unplug_check)
+			return;
 	} else {
 		/* No charger active */
 		if (!(is_usb_chg_plugged_in(chip)
@@ -4000,6 +4005,7 @@
 		chip->warm_temp_dc = INT_MIN;
 
 	chip->temp_check_period = pdata->temp_check_period;
+	chip->dc_unplug_check = pdata->dc_unplug_check;
 	chip->max_bat_chg_current = pdata->max_bat_chg_current;
 	chip->cool_bat_chg_current = pdata->cool_bat_chg_current;
 	chip->warm_bat_chg_current = pdata->warm_bat_chg_current;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index e87b4bd..352e60e 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -76,12 +76,29 @@
 	if (psy->set_property)
 		return psy->set_property(psy, POWER_SUPPLY_PROP_SCOPE,
 								&ret);
-
 	return -ENXIO;
 }
 EXPORT_SYMBOL_GPL(power_supply_set_scope);
 
 /**
+ * power_supply_set_supply_type - set type of the power supply
+ * @psy:	the power supply to control
+ * @supply_type:	sets type property of power supply
+ */
+int power_supply_set_supply_type(struct power_supply *psy,
+				enum power_supply_type supply_type)
+{
+	const union power_supply_propval ret = {supply_type,};
+
+	if (psy->set_property)
+		return psy->set_property(psy, POWER_SUPPLY_PROP_TYPE,
+								&ret);
+
+	return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(power_supply_set_supply_type);
+
+/**
  * power_supply_set_charge_type - set charge type of the power supply
  * @psy:	the power supply to control
  * @enable:	sets charge type property of power supply
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
index fa9d1df..6eb5d60 100644
--- a/drivers/slimbus/slim-msm-ctrl.c
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -1746,9 +1746,6 @@
 		},
 	};
 
-	if (!dev->use_rx_msgqs)
-		goto init_rx_msgq;
-
 	bam_props.ee = dev->ee;
 	bam_props.virt_addr = dev->bam.base;
 	bam_props.phys_addr = bam_mem->start;
@@ -1784,7 +1781,7 @@
 	ret = msm_slim_init_rx_msgq(dev);
 	if (ret)
 		dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
-	if (!dev->use_rx_msgqs && bam_handle) {
+	if (ret && bam_handle) {
 		sps_deregister_bam_device(bam_handle);
 		dev->bam.hdl = 0L;
 	}
@@ -1850,6 +1847,7 @@
 	struct resource		*bam_mem, *bam_io;
 	struct resource		*slim_mem, *slim_io;
 	struct resource		*irq, *bam_irq;
+	bool			rxreg_access = false;
 	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"slimbus_physical");
 	if (!slim_mem) {
@@ -1922,13 +1920,15 @@
 			dev_err(&pdev->dev, "Cell index not specified:%d", ret);
 			goto err_of_init_failed;
 		}
+		rxreg_access = of_property_read_bool(pdev->dev.of_node,
+					"qcom,rxreg-access");
 		/* Optional properties */
 		ret = of_property_read_u32(pdev->dev.of_node,
 					"qcom,min-clk-gear", &dev->ctrl.min_cg);
 		ret = of_property_read_u32(pdev->dev.of_node,
 					"qcom,max-clk-gear", &dev->ctrl.max_cg);
-		pr_err("min_cg:%d, max_cg:%d, ret:%d", dev->ctrl.min_cg,
-					dev->ctrl.max_cg, ret);
+		pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
+					dev->ctrl.max_cg, rxreg_access);
 	} else {
 		dev->ctrl.nr = pdev->id;
 	}
@@ -1947,7 +1947,11 @@
 	mutex_init(&dev->tx_lock);
 	spin_lock_init(&dev->rx_lock);
 	dev->ee = 1;
-	dev->use_rx_msgqs = 1;
+	if (rxreg_access)
+		dev->use_rx_msgqs = 0;
+	else
+		dev->use_rx_msgqs = 1;
+
 	dev->irq = irq->start;
 	dev->bam.irq = bam_irq->start;
 
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 8cef99e..2198954 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -2516,6 +2516,18 @@
 	u32 segdist;
 	struct slim_pending_ch *pch;
 
+	/*
+	 * If there are no pending changes from this client, avoid sending
+	 * the reconfiguration sequence
+	 */
+	if (sb->pending_msgsl == sb->cur_msgsl &&
+		list_empty(&sb->mark_define) &&
+		list_empty(&sb->mark_removal) &&
+		list_empty(&sb->mark_suspend)) {
+		pr_debug("SLIM_CL: skip reconfig sequence");
+		return 0;
+	}
+
 	mutex_lock(&ctrl->sched.m_reconf);
 	mutex_lock(&ctrl->m_ctrl);
 	ctrl->sched.pending_msgsl += sb->pending_msgsl - sb->cur_msgsl;
@@ -2538,7 +2550,6 @@
 		struct slim_ich *slc = &ctrl->chans[pch->chan];
 		slc->state = SLIM_CH_SUSPENDED;
 	}
-	mutex_unlock(&ctrl->m_ctrl);
 
 	ret = slim_allocbw(sb, &subframe, &clkgear);
 
@@ -2688,7 +2699,6 @@
 			NULL, 0, 3, NULL, 0, NULL);
 	dev_dbg(&ctrl->dev, "reconfig now:ret:%d\n", ret);
 	if (!ret) {
-		mutex_lock(&ctrl->m_ctrl);
 		ctrl->sched.subfrmcode = subframe;
 		ctrl->clkgear = clkgear;
 		ctrl->sched.msgsl = ctrl->sched.pending_msgsl;
@@ -2700,7 +2710,6 @@
 	}
 
 revert_reconfig:
-	mutex_lock(&ctrl->m_ctrl);
 	/* Revert channel changes */
 	slim_chan_changes(sb, true);
 	mutex_unlock(&ctrl->m_ctrl);
@@ -2949,6 +2958,7 @@
 	mutex_unlock(&ctrl->sched.m_reconf);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
 
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION("0.1");
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 422e99e..ae1eff8 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -79,6 +79,7 @@
 #define PMIC_ARB_MAX_PERIPHS		256
 #define PMIC_ARB_PERIPH_ID_VALID	(1 << 15)
 #define PMIC_ARB_TIMEOUT_US		100
+#define PMIC_ARB_MAX_TRANS_BYTES	(8)
 
 #define PMIC_ARB_APID_MASK				0xFF
 #define PMIC_ARB_PPID_MASK				0xFFF
@@ -160,49 +161,29 @@
 	return -ETIMEDOUT;
 }
 
+/**
+ * pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
+ * @bc byte count -1. range: 0..3
+ * @reg register's address
+ * @buf output parameter, length must be bc+1
+ */
 static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
 {
 	u32 data = pmic_arb_read(dev, reg);
-
-	switch (bc & 0x3) {
-	case 3:
-		*buf++ = data & 0xff;
-		data >>= 8;
-	case 2:
-		*buf++ = data & 0xff;
-		data >>= 8;
-	case 1:
-		*buf++ = data & 0xff;
-		data >>= 8;
-	case 0:
-		*buf++ = data & 0xff;
-	default:
-		break;
-	}
+	memcpy(buf, &data, (bc & 3) + 1);
 }
 
+/**
+ * pa_write_data: write 1..4 bytes from buf to pmic-arb's register
+ * @bc byte-count -1. range: 0..3
+ * @reg register's address
+ * @buf buffer to write. length must be bc+1
+ */
 static void
 pa_write_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
 {
 	u32 data = 0;
-
-	switch (bc & 0x3) {
-	case 3:
-		data = (buf[0]|buf[1]<<8|buf[2]<<16|buf[3]<<24);
-		break;
-	case 2:
-		data = (buf[0]|buf[1]<<8|buf[2]<<16);
-		break;
-	case 1:
-		data = (buf[0]|buf[1]<<8);
-		break;
-	case 0:
-		data = (buf[0]);
-		break;
-	default:
-		break;
-	}
-
+	memcpy(&data, buf, (bc & 3) + 1);
 	pmic_arb_write(dev, reg, data);
 }
 
@@ -238,6 +219,12 @@
 	u32 cmd;
 	int rc;
 
+	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
+		dev_err(pmic_arb->dev
+		, "pmic-arb supports 1..%d bytes per trans, but:%d requested"
+					, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
+		return  -EINVAL;
+	}
 	pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
 
 	/* Check the opcode */
@@ -259,11 +246,12 @@
 		goto done;
 
 	/* Read from FIFO, note 'bc' is actually number of bytes minus 1 */
-	pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel), bc);
+	pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel)
+							, min_t(u8, bc, 3));
 
 	if (bc > 3)
 		pa_read_data(pmic_arb, buf + 4,
-				PMIC_ARB_RDATA1(pmic_arb->channel), bc);
+				PMIC_ARB_RDATA1(pmic_arb->channel), bc - 4);
 
 done:
 	spin_unlock_irqrestore(&pmic_arb->lock, flags);
@@ -278,6 +266,12 @@
 	u32 cmd;
 	int rc;
 
+	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
+		dev_err(pmic_arb->dev
+		, "pmic-arb supports 1..%d bytes per trans, but:%d requested"
+					, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
+		return  -EINVAL;
+	}
 	pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
 
 	/* Check the opcode */
@@ -296,11 +290,11 @@
 
 	/* Write data to FIFOs */
 	spin_lock_irqsave(&pmic_arb->lock, flags);
-	pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel), bc);
-
+	pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
+							, min_t(u8, bc, 3));
 	if (bc > 3)
 		pa_write_data(pmic_arb, buf + 4,
-				PMIC_ARB_WDATA1(pmic_arb->channel), bc);
+				PMIC_ARB_WDATA1(pmic_arb->channel), bc - 4);
 
 	/* Start the transaction */
 	pmic_arb_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
diff --git a/drivers/thermal/msm8974-tsens.c b/drivers/thermal/msm8974-tsens.c
index 6628b79..0628d2e 100644
--- a/drivers/thermal/msm8974-tsens.c
+++ b/drivers/thermal/msm8974-tsens.c
@@ -98,10 +98,10 @@
 #define TSENS_THRESHOLD_MIN_CODE	0x0
 
 #define TSENS_CTRL_INIT_DATA1		0x3fffff9
-#define TSENS_GLOBAL_INIT_DATA		0x20013
-#define TSENS_S0_MAIN_CFG_INIT_DATA	0x1ba
+#define TSENS_GLOBAL_INIT_DATA		0x302f16c
+#define TSENS_S0_MAIN_CFG_INIT_DATA	0x1c3
 #define TSENS_SN_MIN_MAX_STATUS_CTRL_DATA	0x3ffc00
-#define TSENS_SN_REMOTE_CFG_DATA	0xdba
+#define TSENS_SN_REMOTE_CFG_DATA	0x11c3
 
 /* Trips: warm and cool */
 enum tsens_trip_type {
@@ -212,7 +212,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(msm_tsens_get_temp);
+EXPORT_SYMBOL(tsens_get_temp);
 
 static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
 			      enum thermal_device_mode *mode)
@@ -474,13 +474,15 @@
 
 static int tsens_calib_sensors(void)
 {
-	int i, tsens_base1_data, tsens0_point1, tsens1_point1;
-	int tsens2_point1, tsens3_point1, tsens4_point1, tsens5_point1;
-	int tsens6_point1, tsens7_point1, tsens8_point1, tsens9_point1;
-	int tsens10_point1, tsens0_point2, tsens1_point2, tsens2_point2;
-	int tsens3_point2, tsens4_point2, tsens5_point2, tsens6_point2;
-	int tsens7_point2, tsens8_point2, tsens9_point2, tsens10_point2;
-	int tsens_base2_data, tsens_calibration_mode, temp;
+	int i, tsens_base1_data = 0, tsens0_point1 = 0, tsens1_point1 = 0;
+	int tsens2_point1 = 0, tsens3_point1 = 0, tsens4_point1 = 0;
+	int tsens5_point1 = 0, tsens6_point1 = 0, tsens7_point1 = 0;
+	int tsens8_point1 = 0, tsens9_point1 = 0, tsens10_point1 = 0;
+	int tsens0_point2 = 0, tsens1_point2 = 0, tsens2_point2 = 0;
+	int tsens3_point2 = 0, tsens4_point2 = 0, tsens5_point2 = 0;
+	int tsens6_point2 = 0, tsens7_point2 = 0, tsens8_point2 = 0;
+	int tsens9_point2 = 0, tsens10_point2 = 0;
+	int tsens_base2_data = 0, tsens_calibration_mode = 0, temp;
 	uint32_t calib_data[5];
 
 	for (i = 0; i < 5; i++)
@@ -492,12 +494,14 @@
 	temp = (calib_data[3] & TSENS_CAL_SEL_2
 			>> TSENS_CAL_SEL_SHIFT_2);
 	tsens_calibration_mode |= temp;
-	/* Remove this after bringup */
-	tsens_calibration_mode = TSENS_ONE_POINT_CALIB;
 
 	if (!tsens_calibration_mode) {
-		pr_err("TSENS not calibrated\n");
-		return -ENODEV;
+		pr_debug("TSENS is calibrationless mode\n");
+		for (i = 0; i < tmdev->tsens_num_sensor; i++) {
+			tmdev->sensor[i].calib_data_point2 = 78000;
+			tmdev->sensor[i].calib_data_point1 = 49200;
+			goto compute_intercept_slope;
+		}
 	} else if (tsens_calibration_mode == TSENS_ONE_POINT_CALIB ||
 				TSENS_TWO_POINT_CALIB) {
 		tsens_base1_data = calib_data[0] & TSENS_BASE1_MASK;
@@ -525,63 +529,72 @@
 		tsens8_point2 = calib_data[4] & TSENS8_POINT2_MASK;
 		tsens9_point2 = calib_data[4] & TSENS9_POINT2_MASK;
 		tsens10_point2 = calib_data[4] & TSENS10_POINT2_MASK;
-	} else
+	} else {
 		pr_debug("Calibration mode is unknown: %d\n",
 						tsens_calibration_mode);
+		return -ENODEV;
+	}
 
-	tmdev->sensor[0].calib_data_point1 =
+	if (tsens_calibration_mode == TSENS_ONE_POINT_CALIB) {
+		tmdev->sensor[0].calib_data_point1 =
 		(((tsens_base1_data + tsens0_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[0].calib_data_point2 =
-		(((tsens_base2_data + tsens0_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[1].calib_data_point1 =
+		tmdev->sensor[1].calib_data_point1 =
 		(((tsens_base1_data + tsens1_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[1].calib_data_point2 =
-		(((tsens_base2_data + tsens1_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[2].calib_data_point1 =
+		tmdev->sensor[2].calib_data_point1 =
 		(((tsens_base1_data + tsens2_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[2].calib_data_point2 =
-		(((tsens_base2_data + tsens2_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[3].calib_data_point1 =
+		tmdev->sensor[3].calib_data_point1 =
 		(((tsens_base1_data + tsens3_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[3].calib_data_point2 =
-		(((tsens_base2_data + tsens3_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[4].calib_data_point1 =
+		tmdev->sensor[4].calib_data_point1 =
 		(((tsens_base1_data + tsens4_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[4].calib_data_point2 =
-		(((tsens_base2_data + tsens4_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[5].calib_data_point1 =
+		tmdev->sensor[5].calib_data_point1 =
 		(((tsens_base1_data + tsens5_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[5].calib_data_point2 =
-		(((tsens_base2_data + tsens5_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[6].calib_data_point1 =
+		tmdev->sensor[6].calib_data_point1 =
 		(((tsens_base1_data + tsens6_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[6].calib_data_point2 =
-		(((tsens_base2_data + tsens6_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[7].calib_data_point1 =
+		tmdev->sensor[7].calib_data_point1 =
 		(((tsens_base1_data + tsens7_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[7].calib_data_point2 =
-		(((tsens_base2_data + tsens7_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[8].calib_data_point1 =
+		tmdev->sensor[8].calib_data_point1 =
 		(((tsens_base1_data + tsens8_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[8].calib_data_point2 =
-		(((tsens_base2_data + tsens8_point2) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[9].calib_data_point1 =
+		tmdev->sensor[9].calib_data_point1 =
 		(((tsens_base1_data + tsens9_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[9].calib_data_point2 =
-		(((tsens_base2_data + tsens9_point2) < 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[10].calib_data_point1 =
+		tmdev->sensor[10].calib_data_point1 =
 		(((tsens_base1_data + tsens10_point1) << 2) | TSENS_BIT_APPEND);
-	tmdev->sensor[10].calib_data_point2 =
-		(((tsens_base2_data + tsens10_point2) << 2) | TSENS_BIT_APPEND);
+	}
 
+	if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
+		tmdev->sensor[0].calib_data_point2 =
+		(((tsens_base2_data + tsens0_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[1].calib_data_point2 =
+		(((tsens_base2_data + tsens1_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[2].calib_data_point2 =
+		(((tsens_base2_data + tsens2_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[3].calib_data_point2 =
+		(((tsens_base2_data + tsens3_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[4].calib_data_point2 =
+		(((tsens_base2_data + tsens4_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[5].calib_data_point2 =
+		(((tsens_base2_data + tsens5_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[6].calib_data_point2 =
+		(((tsens_base2_data + tsens6_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[7].calib_data_point2 =
+		(((tsens_base2_data + tsens7_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[8].calib_data_point2 =
+		(((tsens_base2_data + tsens8_point2) << 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[9].calib_data_point2 =
+		(((tsens_base2_data + tsens9_point2) < 2) | TSENS_BIT_APPEND);
+		tmdev->sensor[10].calib_data_point2 =
+		(((tsens_base2_data + tsens10_point2) << 2) | TSENS_BIT_APPEND);
+	}
+
+compute_intercept_slope:
 	for (i = 0; i < tmdev->tsens_num_sensor; i++) {
 		int32_t num = 0, den = 0;
-		num = TSENS_CAL_DEGC_POINT2 - TSENS_CAL_DEGC_POINT2;
-		den = tmdev->sensor[i].calib_data_point2 -
+		if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB) {
+			num = TSENS_CAL_DEGC_POINT2 - TSENS_CAL_DEGC_POINT2;
+			den = tmdev->sensor[i].calib_data_point2 -
 					tmdev->sensor[i].calib_data_point1;
-		num *= tmdev->tsens_factor;
-		if (tsens_calibration_mode == TSENS_TWO_POINT_CALIB)
+			num *= tmdev->tsens_factor;
 			tmdev->sensor[i].slope_mul_tsens_factor = num/den;
+		}
 		tmdev->sensor[i].offset = (TSENS_CAL_DEGC_POINT1 *
 			tmdev->tsens_factor)
 			- (tmdev->sensor[i].calib_data_point1 *
@@ -615,7 +628,7 @@
 	}
 
 	rc = of_property_read_u32_array(of_node,
-		"qcom,slope", tsens_slope_data, tsens_num_sensors/sizeof(u32));
+		"qcom,slope", tsens_slope_data, tsens_num_sensors);
 	if (rc) {
 		dev_err(&pdev->dev, "invalid or missing property: tsens-slope\n");
 		return rc;
diff --git a/drivers/tty/smux_ctl.c b/drivers/tty/smux_ctl.c
index 0078b04..7e0e6f8 100644
--- a/drivers/tty/smux_ctl.c
+++ b/drivers/tty/smux_ctl.c
@@ -33,6 +33,7 @@
 #include <linux/smux.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
+#include <linux/poll.h>
 
 #include <asm/ioctls.h>
 
@@ -753,6 +754,33 @@
 	return ret;
 }
 
+static unsigned int smux_ctl_poll(struct file *file, poll_table *wait)
+{
+	struct smux_ctl_dev *devp;
+	unsigned int mask = 0;
+	int readable;
+
+	devp = file->private_data;
+	if (!devp)
+		return -ENODEV;
+
+	SMUXCTL_DBG(SMUX_CTL_MODULE_NAME ": %s called on smuxctl%d\n",
+			__func__, devp->id);
+
+	poll_wait(file, &devp->read_wait_queue, wait);
+
+	readable = smux_ctl_readable(devp->id);
+	if (readable < 0) {
+		pr_err(SMUX_CTL_MODULE_NAME ": %s err%d during poll for smuxctl%d\n",
+			__func__, readable, devp->id);
+		mask = POLLERR;
+	} else if (readable) {
+		mask = POLLIN | POLLRDNORM;
+	}
+
+	return mask;
+}
+
 static const struct file_operations smux_ctl_fops = {
 	.owner = THIS_MODULE,
 	.open = smux_ctl_open,
@@ -760,6 +788,7 @@
 	.read = smux_ctl_read,
 	.write = smux_ctl_write,
 	.unlocked_ioctl = smux_ctl_ioctl,
+	.poll = smux_ctl_poll,
 };
 
 static void smux_ctl_reset_channel(struct smux_ctl_dev *devp)
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 54c486e..49d7c0f 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -72,6 +72,7 @@
 #include "u_bam_data.c"
 #include "f_mbim.c"
 #include "f_qc_ecm.c"
+#include "f_qc_rndis.c"
 #include "u_qc_ether.c"
 #ifdef CONFIG_TARGET_CORE
 #include "f_tcm.c"
@@ -98,7 +99,7 @@
 	char *dev_name;
 	struct device_attribute **attributes;
 
-	/* for android_dev.enabled_functions */
+	/* for android_conf.enabled_functions */
 	struct list_head enabled_list;
 
 	struct android_dev *android_dev;
@@ -126,8 +127,8 @@
 };
 
 struct android_dev {
+	const char *name;
 	struct android_usb_function **functions;
-	struct list_head enabled_functions;
 	struct usb_composite_dev *cdev;
 	struct device *dev;
 
@@ -142,9 +143,23 @@
 	struct pm_qos_request pm_qos_req_dma;
 	struct work_struct work;
 
+	/* A list of struct android_configuration */
+	struct list_head configs;
+	int configs_num;
+
+	/* A list node inside the android_dev_list */
 	struct list_head list_item;
 
-	struct usb_configuration config;
+};
+
+struct android_configuration {
+	struct usb_configuration usb_config;
+
+	/* A list of the functions supported by this config */
+	struct list_head enabled_functions;
+
+	/* A list node inside the struct android_dev.configs list */
+	struct list_head list_item;
 };
 
 static struct class *android_class;
@@ -153,6 +168,10 @@
 static int android_bind_config(struct usb_configuration *c);
 static void android_unbind_config(struct usb_configuration *c);
 static struct android_dev *cdev_to_android_dev(struct usb_composite_dev *cdev);
+static struct android_configuration *alloc_android_config
+						(struct android_dev *dev);
+static void free_android_config(struct android_dev *dev,
+				struct android_configuration *conf);
 
 /* string IDs are assigned dynamically */
 #define STRING_MANUFACTURER_IDX		0
@@ -295,13 +314,17 @@
 static void android_enable(struct android_dev *dev)
 {
 	struct usb_composite_dev *cdev = dev->cdev;
+	struct android_configuration *conf;
 
 	if (WARN_ON(!dev->disable_depth))
 		return;
 
 	if (--dev->disable_depth == 0) {
-		usb_add_config(cdev, &dev->config,
-					android_bind_config);
+
+		list_for_each_entry(conf, &dev->configs, list_item)
+			usb_add_config(cdev, &conf->usb_config,
+						android_bind_config);
+
 		usb_gadget_connect(cdev->gadget);
 	}
 }
@@ -309,12 +332,15 @@
 static void android_disable(struct android_dev *dev)
 {
 	struct usb_composite_dev *cdev = dev->cdev;
+	struct android_configuration *conf;
 
 	if (dev->disable_depth++ == 0) {
 		usb_gadget_disconnect(cdev->gadget);
 		/* Cancel pending control requests */
 		usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
-		usb_remove_config(cdev, &dev->config);
+
+		list_for_each_entry(conf, &dev->configs, list_item)
+			usb_remove_config(cdev, &conf->usb_config);
 	}
 }
 
@@ -965,6 +991,7 @@
 struct rndis_function_config {
 	u8      ethaddr[ETH_ALEN];
 	u32     vendorID;
+	u8      max_pkt_per_xfer;
 	char	manufacturer[256];
 	/* "Wireless" RNDIS; auto-detected by Windows */
 	bool	wceis;
@@ -986,6 +1013,22 @@
 	f->config = NULL;
 }
 
+static int rndis_qc_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+
+	return rndis_qc_init();
+}
+
+static void rndis_qc_function_cleanup(struct android_usb_function *f)
+{
+	rndis_qc_cleanup();
+	kfree(f->config);
+}
+
 static int
 rndis_function_bind_config(struct android_usb_function *f,
 		struct usb_configuration *c)
@@ -1024,12 +1067,56 @@
 					   rndis->manufacturer);
 }
 
+static int rndis_qc_function_bind_config(struct android_usb_function *f,
+					struct usb_configuration *c)
+{
+	int ret;
+	struct rndis_function_config *rndis = f->config;
+
+	if (!rndis) {
+		pr_err("%s: rndis_pdata\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+		rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+		rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+	ret = gether_qc_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+	if (ret) {
+		pr_err("%s: gether_setup failed\n", __func__);
+		return ret;
+	}
+
+	if (rndis->wceis) {
+		/* "Wireless" RNDIS; auto-detected by Windows */
+		rndis_qc_iad_descriptor.bFunctionClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_qc_iad_descriptor.bFunctionSubClass = 0x01;
+		rndis_qc_iad_descriptor.bFunctionProtocol = 0x03;
+		rndis_qc_control_intf.bInterfaceClass =
+						USB_CLASS_WIRELESS_CONTROLLER;
+		rndis_qc_control_intf.bInterfaceSubClass =	 0x01;
+		rndis_qc_control_intf.bInterfaceProtocol =	 0x03;
+	}
+
+	return rndis_qc_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,
+				    rndis->manufacturer,
+					rndis->max_pkt_per_xfer);
+}
+
 static void rndis_function_unbind_config(struct android_usb_function *f,
 						struct usb_configuration *c)
 {
 	gether_cleanup();
 }
 
+static void rndis_qc_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	gether_qc_cleanup();
+}
+
 static ssize_t rndis_manufacturer_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -1136,11 +1223,38 @@
 static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
 						rndis_vendorID_store);
 
+static ssize_t rndis_max_pkt_per_xfer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	return snprintf(buf, PAGE_SIZE, "%d\n", config->max_pkt_per_xfer);
+}
+
+static ssize_t rndis_max_pkt_per_xfer_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct rndis_function_config *config = f->config;
+	int value;
+
+	if (sscanf(buf, "%d", &value) == 1) {
+		config->max_pkt_per_xfer = value;
+		return size;
+	}
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(max_pkt_per_xfer, S_IRUGO | S_IWUSR,
+				   rndis_max_pkt_per_xfer_show,
+				   rndis_max_pkt_per_xfer_store);
+
 static struct device_attribute *rndis_function_attributes[] = {
 	&dev_attr_manufacturer,
 	&dev_attr_wceis,
 	&dev_attr_ethaddr,
 	&dev_attr_vendorID,
+	&dev_attr_max_pkt_per_xfer,
 	NULL
 };
 
@@ -1153,6 +1267,14 @@
 	.attributes	= rndis_function_attributes,
 };
 
+static struct android_usb_function rndis_qc_function = {
+	.name		= "rndis_qc",
+	.init		= rndis_qc_function_init,
+	.cleanup	= rndis_qc_function_cleanup,
+	.bind_config	= rndis_qc_function_bind_config,
+	.unbind_config	= rndis_qc_function_unbind_config,
+	.attributes	= rndis_function_attributes,
+};
 
 struct mass_storage_function_config {
 	struct fsg_config fsg;
@@ -1356,6 +1478,7 @@
 	&mtp_function,
 	&ptp_function,
 	&rndis_function,
+	&rndis_qc_function,
 	&mass_storage_function,
 	&accessory_function,
 	&uasp_function,
@@ -1457,9 +1580,11 @@
 			       struct usb_configuration *c)
 {
 	struct android_usb_function *f;
+	struct android_configuration *conf =
+		container_of(c, struct android_configuration, usb_config);
 	int ret;
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+	list_for_each_entry(f, &conf->enabled_functions, enabled_list) {
 		ret = f->bind_config(f, c);
 		if (ret) {
 			pr_err("%s: %s failed", __func__, f->name);
@@ -1474,25 +1599,30 @@
 			       struct usb_configuration *c)
 {
 	struct android_usb_function *f;
+	struct android_configuration *conf =
+		container_of(c, struct android_configuration, usb_config);
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+	list_for_each_entry(f, &conf->enabled_functions, enabled_list) {
 		if (f->unbind_config)
 			f->unbind_config(f, c);
 	}
 }
 
-static int android_enable_function(struct android_dev *dev, char *name)
+static int android_enable_function(struct android_dev *dev,
+				   struct android_configuration *conf,
+				   char *name)
 {
 	struct android_usb_function **functions = dev->functions;
 	struct android_usb_function *f;
 	while ((f = *functions++)) {
 		if (!strcmp(name, f->name)) {
 			if (f->android_dev)
-				pr_err("%s cannot be enabled on two devices\n",
+				pr_err("%s already enabled in other " \
+					"configuration or device\n",
 					f->name);
 			else {
 				list_add_tail(&f->enabled_list,
-					      &dev->enabled_functions);
+					      &conf->enabled_functions);
 				f->android_dev = dev;
 				return 0;
 			}
@@ -1508,9 +1638,20 @@
 		struct device_attribute *attr, char *buf)
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_configuration *conf;
+
+	/*
+	 * Show the wakeup attribute of the first configuration,
+	 * since all configurations have the same wakeup attribute
+	 */
+	if (dev->configs_num == 0)
+		return 0;
+	conf = list_entry(dev->configs.next,
+			  struct android_configuration,
+			  list_item);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			!!(dev->config.bmAttributes &
+			!!(conf->usb_config.bmAttributes &
 				USB_CONFIG_ATT_WAKEUP));
 }
 
@@ -1518,6 +1659,7 @@
 		struct device_attribute *attr, const char *buff, size_t size)
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_configuration *conf;
 	int enable = 0;
 
 	sscanf(buff, "%d", &enable);
@@ -1525,10 +1667,13 @@
 	pr_debug("android_usb: %s remote wakeup\n",
 			enable ? "enabling" : "disabling");
 
-	if (enable)
-		dev->config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
-	else
-		dev->config.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
+	list_for_each_entry(conf, &dev->configs, list_item)
+		if (enable)
+			conf->usb_config.bmAttributes |=
+					USB_CONFIG_ATT_WAKEUP;
+		else
+			conf->usb_config.bmAttributes &=
+					~USB_CONFIG_ATT_WAKEUP;
 
 	return size;
 }
@@ -1537,13 +1682,18 @@
 functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_configuration *conf;
 	struct android_usb_function *f;
 	char *buff = buf;
 
 	mutex_lock(&dev->mutex);
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list)
-		buff += snprintf(buff, PAGE_SIZE, "%s,", f->name);
+	list_for_each_entry(conf, &dev->configs, list_item) {
+		if (buff != buf)
+			*(buff-1) = ':';
+		list_for_each_entry(f, &conf->enabled_functions, enabled_list)
+			buff += snprintf(buff, PAGE_SIZE, "%s,", f->name);
+	}
 
 	mutex_unlock(&dev->mutex);
 
@@ -1558,6 +1708,9 @@
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
 	struct android_usb_function *f;
+	struct list_head *curr_conf = &dev->configs;
+	struct android_configuration *conf;
+	char *conf_str;
 	char *name;
 	char buf[256], *b;
 	int err;
@@ -1570,21 +1723,45 @@
 	}
 
 	/* Clear previous enabled list */
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-		f->android_dev = NULL;
+	list_for_each_entry(conf, &dev->configs, list_item) {
+		list_for_each_entry(f, &conf->enabled_functions, enabled_list)
+			f->android_dev = NULL;
+		INIT_LIST_HEAD(&conf->enabled_functions);
 	}
-	INIT_LIST_HEAD(&dev->enabled_functions);
 
 	strlcpy(buf, buff, sizeof(buf));
 	b = strim(buf);
 
 	while (b) {
-		name = strsep(&b, ",");
-		if (name) {
-			err = android_enable_function(dev, name);
-			if (err)
-				pr_err("android_usb: Cannot enable '%s'", name);
+		conf_str = strsep(&b, ":");
+		if (conf_str) {
+			/* If the next not equal to the head, take it */
+			if (curr_conf->next != &dev->configs)
+				conf = list_entry(curr_conf->next,
+						  struct android_configuration,
+						  list_item);
+			else
+				conf = alloc_android_config(dev);
+
+			curr_conf = curr_conf->next;
 		}
+
+		while (conf_str) {
+			name = strsep(&conf_str, ",");
+			if (name) {
+				err = android_enable_function(dev, conf, name);
+				if (err)
+					pr_err("android_usb: Cannot enable %s",
+						name);
+			}
+		}
+	}
+
+	/* Free uneeded configurations if exists */
+	while (curr_conf->next != &dev->configs) {
+		conf = list_entry(curr_conf->next,
+				  struct android_configuration, list_item);
+		free_android_config(dev, conf);
 	}
 
 	mutex_unlock(&dev->mutex);
@@ -1606,6 +1783,7 @@
 	struct android_dev *dev = dev_get_drvdata(pdev);
 	struct usb_composite_dev *cdev = dev->cdev;
 	struct android_usb_function *f;
+	struct android_configuration *conf;
 	int enabled = 0;
 
 	if (!cdev)
@@ -1625,18 +1803,22 @@
 		cdev->desc.bDeviceClass = device_desc.bDeviceClass;
 		cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
 		cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
-		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-			if (f->enable)
-				f->enable(f);
-		}
+		list_for_each_entry(conf, &dev->configs, list_item)
+			list_for_each_entry(f, &conf->enabled_functions,
+						enabled_list) {
+				if (f->enable)
+					f->enable(f);
+			}
 		android_enable(dev);
 		dev->enabled = true;
 	} else if (!enabled && dev->enabled) {
 		android_disable(dev);
-		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-			if (f->disable)
-				f->disable(f);
-		}
+		list_for_each_entry(conf, &dev->configs, list_item)
+			list_for_each_entry(f, &conf->enabled_functions,
+						enabled_list) {
+				if (f->disable)
+					f->disable(f);
+			}
 		dev->enabled = false;
 	} else {
 		pr_err("android_usb: already %s\n",
@@ -1792,6 +1974,7 @@
 {
 	struct android_dev *dev;
 	struct usb_gadget	*gadget = cdev->gadget;
+	struct android_configuration *conf;
 	int			gcnum, id, ret;
 
 	/* Bind to the last android_dev that was probed */
@@ -1840,7 +2023,8 @@
 	device_desc.iSerialNumber = id;
 
 	if (gadget_is_otg(cdev->gadget))
-		dev->config.descriptors = otg_desc;
+		list_for_each_entry(conf, &dev->configs, list_item)
+			conf->usb_config.descriptors = otg_desc;
 
 	gcnum = usb_gadget_controller_number(gadget);
 	if (gcnum >= 0)
@@ -1881,6 +2065,7 @@
 	struct android_dev		*dev = cdev_to_android_dev(cdev);
 	struct usb_request		*req = cdev->req;
 	struct android_usb_function	*f;
+	struct android_configuration	*conf;
 	int value = -EOPNOTSUPP;
 	unsigned long flags;
 
@@ -1889,13 +2074,16 @@
 	req->length = 0;
 	gadget->ep0->driver_data = cdev;
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-		if (f->ctrlrequest) {
-			value = f->ctrlrequest(f, cdev, c);
-			if (value >= 0)
-				break;
-		}
-	}
+	list_for_each_entry(conf, &dev->configs, list_item)
+		if (&conf->usb_config == cdev->config)
+			list_for_each_entry(f,
+					    &conf->enabled_functions,
+					    enabled_list)
+				if (f->ctrlrequest) {
+					value = f->ctrlrequest(f, cdev, c);
+					if (value >= 0)
+						break;
+				}
 
 	/* Special case the accessory function.
 	 * It needs to handle control requests before it is enabled.
@@ -1986,6 +2174,38 @@
 	return dev;
 }
 
+static struct android_configuration *alloc_android_config
+						(struct android_dev *dev)
+{
+	struct android_configuration *conf;
+
+	conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+	if (!conf) {
+		pr_err("%s(): Failed to alloc memory for android conf\n",
+			__func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dev->configs_num++;
+	conf->usb_config.label = dev->name;
+	conf->usb_config.unbind = android_unbind_config;
+	conf->usb_config.bConfigurationValue = dev->configs_num;
+
+	INIT_LIST_HEAD(&conf->enabled_functions);
+
+	list_add_tail(&conf->list_item, &dev->configs);
+
+	return conf;
+}
+
+static void free_android_config(struct android_dev *dev,
+			     struct android_configuration *conf)
+{
+	list_del(&conf->list_item);
+	dev->configs_num--;
+	kfree(conf);
+}
+
 static int __devinit android_probe(struct platform_device *pdev)
 {
 	struct android_usb_platform_data *pdata = pdev->dev.platform_data;
@@ -2006,12 +2226,11 @@
 		goto err_alloc;
 	}
 
-	android_dev->config.label = pdev->name;
-	android_dev->config.unbind = android_unbind_config;
-	android_dev->config.bConfigurationValue = 1;
+	android_dev->name = pdev->name;
 	android_dev->disable_depth = 1;
 	android_dev->functions = supported_functions;
-	INIT_LIST_HEAD(&android_dev->enabled_functions);
+	android_dev->configs_num = 0;
+	INIT_LIST_HEAD(&android_dev->configs);
 	INIT_WORK(&android_dev->work, android_work);
 	mutex_init(&android_dev->mutex);
 
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 4d15d4d..4d15c55 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -54,6 +54,7 @@
 #include <linux/dmapool.h>
 #include <linux/dma-mapping.h>
 #include <linux/init.h>
+#include <linux/ratelimit.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/irq.h>
@@ -74,6 +75,7 @@
  *****************************************************************************/
 
 #define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+#define ATDTW_SET_DELAY		100 /* 100msec delay */
 
 /* ctrl register bank access */
 static DEFINE_SPINLOCK(udc_lock);
@@ -1764,6 +1766,7 @@
 		struct ci13xxx_req *mReqPrev;
 		int n = hw_ep_bit(mEp->num, mEp->dir);
 		int tmp_stat;
+		ktime_t start, diff;
 
 		mReqPrev = list_entry(mEp->qh.queue.prev,
 				struct ci13xxx_req, queue);
@@ -1774,9 +1777,20 @@
 		wmb();
 		if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
 			goto done;
+		start = ktime_get();
 		do {
 			hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
 			tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
+			diff = ktime_sub(ktime_get(), start);
+			/* poll for max. 100ms */
+			if (ktime_to_ms(diff) > ATDTW_SET_DELAY) {
+				if (hw_cread(CAP_USBCMD, USBCMD_ATDTW))
+					break;
+				printk_ratelimited(KERN_ERR
+				"%s:queue failed ep#%d %s\n",
+				 __func__, mEp->num, mEp->dir ? "IN" : "OUT");
+				return -EAGAIN;
+			}
 		} while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
 		hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
 		if (tmp_stat)
diff --git a/drivers/usb/gadget/f_qc_rndis.c b/drivers/usb/gadget/f_qc_rndis.c
new file mode 100644
index 0000000..dcf307d
--- /dev/null
+++ b/drivers/usb/gadget/f_qc_rndis.c
@@ -0,0 +1,1151 @@
+/*
+ * f_qc_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ *			Author: Michal Nazarewicz (mina86@mina86.com)
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+#include "rndis.h"
+
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet.  The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex.  Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short:  it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets.  Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data.  The control model is built around
+ * what's essentially an "RNDIS RPC" protocol.  It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored).  RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface.  That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy.  They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely.  Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ *   - Power management ... references data that's scattered around lots
+ *     of other documentation, which is incorrect/incomplete there too.
+ *
+ *   - There are various undocumented protocol requirements, like the need
+ *     to send garbage in some control-OUT messages.
+ *
+ *   - MS-Windows drivers sometimes emit undocumented requests.
+ *
+ * This function is based on RNDIS link function driver and
+ * contains MSM specific implementation.
+ */
+
+struct f_rndis_qc {
+	struct qc_gether			port;
+	u8				ctrl_id, data_id;
+	u8				ethaddr[ETH_ALEN];
+	u32				vendorID;
+	u8				max_pkt_per_xfer;
+	const char			*manufacturer;
+	int				config;
+	atomic_t		ioctl_excl;
+	atomic_t		open_excl;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	atomic_t			notify_count;
+};
+
+static inline struct f_rndis_qc *func_to_rndis_qc(struct usb_function *f)
+{
+	return container_of(f, struct f_rndis_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return 13 * 1024 * 8 * 1000 * 8;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define RNDIS_QC_STATUS_BYTECOUNT		8	/* 8 bytes data */
+
+/* currently only one rndis instance is supported */
+#define RNDIS_QC_NO_PORTS					1
+
+/* default max packets per tarnsfer value */
+#define DEFAULT_MAX_PKT_PER_XFER			15
+
+
+#define RNDIS_QC_IOCTL_MAGIC		'i'
+#define RNDIS_QC_GET_MAX_PKT_PER_XFER   _IOR(RNDIS_QC_IOCTL_MAGIC, 1, u8)
+
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor rndis_qc_control_intf = {
+	.bLength =		sizeof rndis_qc_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =   USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =   USB_CDC_ACM_PROTO_VENDOR,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc rndis_qc_header_desc = {
+	.bLength =		sizeof rndis_qc_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor rndis_qc_call_mgmt_descriptor = {
+	.bLength =		sizeof rndis_qc_call_mgmt_descriptor,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+
+	.bmCapabilities =	0x00,
+	.bDataInterface =	0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_qc_acm_descriptor = {
+	.bLength =		sizeof rndis_qc_acm_descriptor,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+
+	.bmCapabilities =	0x00,
+};
+
+static struct usb_cdc_union_desc rndis_qc_union_desc = {
+	.bLength =		sizeof(rndis_qc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_qc_data_intf = {
+	.bLength =		sizeof rndis_qc_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+
+static struct usb_interface_assoc_descriptor
+rndis_qc_iad_descriptor = {
+	.bLength =		sizeof rndis_qc_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+	.bFirstInterface =	0, /* XXX, hardcoded */
+	.bInterfaceCount =	2, /* control + data */
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	/* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		1 << RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor rndis_qc_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_qc_fs_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_fs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_fs_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(RNDIS_QC_STATUS_BYTECOUNT),
+	.bInterval =		RNDIS_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor rndis_qc_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_qc_hs_function[] = {
+	(struct usb_descriptor_header *) &rndis_qc_iad_descriptor,
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_hs_notify_desc,
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_hs_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_hs_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor rndis_qc_ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_intr_comp_desc = {
+	.bLength =		sizeof ss_intr_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor rndis_qc_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rndis_qc_ss_bulk_comp_desc = {
+	.bLength =		sizeof ss_bulk_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *eth_qc_ss_function[] = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_qc_control_intf,
+	(struct usb_descriptor_header *) &rndis_qc_header_desc,
+	(struct usb_descriptor_header *) &rndis_qc_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_qc_union_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_notify_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_intr_comp_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_qc_data_intf,
+	(struct usb_descriptor_header *) &rndis_qc_ss_in_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_out_desc,
+	(struct usb_descriptor_header *) &rndis_qc_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_qc_string_defs[] = {
+	[0].s = "RNDIS Communications Control",
+	[1].s = "RNDIS Ethernet Data",
+	[2].s = "RNDIS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_qc_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rndis_qc_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_qc_strings[] = {
+	&rndis_qc_string_table,
+	NULL,
+};
+
+struct f_rndis_qc *_rndis_qc;
+
+static inline int rndis_qc_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -EBUSY;
+	}
+}
+
+static inline void rndis_qc_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* MSM bam support */
+static struct data_port rndis_qc_bam_port;
+
+static int rndis_qc_bam_setup(void)
+{
+	int ret;
+
+	ret = bam_data_setup(RNDIS_QC_NO_PORTS);
+	if (ret) {
+		pr_err("bam_data_setup failed err: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rndis_qc_bam_connect(struct f_rndis_qc *dev)
+{
+	int ret;
+
+	rndis_qc_bam_port.func = dev->port.func;
+	rndis_qc_bam_port.in = dev->port.in_ep;
+	rndis_qc_bam_port.out = dev->port.out_ep;
+
+	/* currently we use the first connection */
+	ret = bam_data_connect(&rndis_qc_bam_port, 0, 0);
+	if (ret) {
+		pr_err("bam_data_connect failed: err:%d\n",
+				ret);
+		return ret;
+	} else {
+		pr_info("rndis bam connected\n");
+	}
+
+	return 0;
+}
+
+static int rndis_qc_bam_disconnect(struct f_rndis_qc *dev)
+{
+	pr_info("dev:%p. %s Do nothing.\n",
+			dev, __func__);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct sk_buff *rndis_qc_add_header(struct qc_gether *port,
+					struct sk_buff *skb)
+{
+	struct sk_buff *skb2;
+
+	skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+	if (skb2)
+		rndis_add_hdr(skb2);
+
+	dev_kfree_skb_any(skb);
+	return skb2;
+}
+
+int rndis_qc_rm_hdr(struct qc_gether *port,
+			struct sk_buff *skb,
+			struct sk_buff_head *list)
+{
+	/* tmp points to a struct rndis_packet_msg_type */
+	__le32 *tmp = (void *)skb->data;
+
+	/* MessageType, MessageLength */
+	if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
+			!= get_unaligned(tmp++)) {
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
+	tmp++;
+
+	/* DataOffset, DataLength */
+	if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
+		dev_kfree_skb_any(skb);
+		return -EOVERFLOW;
+	}
+	skb_trim(skb, get_unaligned_le32(tmp++));
+
+	skb_queue_tail(list, skb);
+	return 0;
+}
+
+
+static void rndis_qc_response_available(void *_rndis)
+{
+	struct f_rndis_qc			*rndis = _rndis;
+	struct usb_request		*req = rndis->notify_req;
+	__le32				*data = req->buf;
+	int				status;
+
+	if (atomic_inc_return(&rndis->notify_count) != 1)
+		return;
+
+	/* Send RNDIS RESPONSE_AVAILABLE notification; a
+	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+	 *
+	 * This is the only notification defined by RNDIS.
+	 */
+	data[0] = cpu_to_le32(1);
+	data[1] = cpu_to_le32(0);
+
+	status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+	if (status) {
+		atomic_dec(&rndis->notify_count);
+		pr_info("notify/0 --> %d\n", status);
+	}
+}
+
+static void rndis_qc_response_complete(struct usb_ep *ep,
+						struct usb_request *req)
+{
+	struct f_rndis_qc			*rndis = req->context;
+	int				status = req->status;
+
+	/* after TX:
+	 *  - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+	 *  - RNDIS_RESPONSE_AVAILABLE (status/irq)
+	 */
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&rndis->notify_count, 0);
+		break;
+	default:
+		pr_info("RNDIS %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != rndis->notify)
+			break;
+
+		/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&rndis->notify_count))
+			break;
+		status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&rndis->notify_count);
+			DBG(cdev, "notify/1 --> %d\n", status);
+		}
+		break;
+	}
+}
+
+static void rndis_qc_command_complete(struct usb_ep *ep,
+							struct usb_request *req)
+{
+	struct f_rndis_qc			*rndis = req->context;
+	int				status;
+
+	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+	status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
+	if (status < 0)
+		pr_err("RNDIS command error %d, %d/%d\n",
+			status, req->actual, req->length);
+}
+
+static int
+rndis_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* RNDIS uses the CDC command encapsulation mechanism to implement
+	 * an RPC scheme, with much getting/setting of attributes by OID.
+	 */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		/* read the request; process it later */
+		value = w_length;
+		req->complete = rndis_qc_command_complete;
+		req->context = rndis;
+		/* later, rndis_response_available() sends a notification */
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		else {
+			u8 *buf;
+			u32 n;
+
+			/* return the result */
+			buf = rndis_get_next_response(rndis->config, &n);
+			if (buf) {
+				memcpy(req->buf, buf, n);
+				req->complete = rndis_qc_response_complete;
+				rndis_free_response(rndis->config, buf);
+				value = n;
+			}
+			/* else stalls ... spec says to avoid that */
+		}
+		break;
+
+	default:
+invalid:
+		VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (value < w_length);
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("rndis response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* we know alt == 0 */
+
+	if (intf == rndis->ctrl_id) {
+		if (rndis->notify->driver_data) {
+			VDBG(cdev, "reset rndis control %d\n", intf);
+			usb_ep_disable(rndis->notify);
+		}
+		if (!rndis->notify->desc) {
+			VDBG(cdev, "init rndis ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+				goto fail;
+		}
+		usb_ep_enable(rndis->notify);
+		rndis->notify->driver_data = rndis;
+
+	} else if (intf == rndis->data_id) {
+		struct net_device	*net;
+
+		if (rndis->port.in_ep->driver_data) {
+			DBG(cdev, "reset rndis\n");
+			gether_qc_disconnect(&rndis->port);
+			rndis_qc_bam_disconnect(rndis);
+		}
+
+		if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
+			DBG(cdev, "init rndis\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       rndis->port.in_ep) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       rndis->port.out_ep)) {
+				rndis->port.in_ep->desc = NULL;
+				rndis->port.out_ep->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* Avoid ZLPs; they can be troublesome. */
+		rndis->port.is_zlp_ok = false;
+
+		/* RNDIS should be in the "RNDIS uninitialized" state,
+		 * either never activated or after rndis_uninit().
+		 *
+		 * We don't want data to flow here until a nonzero packet
+		 * filter is set, at which point it enters "RNDIS data
+		 * initialized" state ... but we do want the endpoints
+		 * to be activated.  It's a strange little state.
+		 *
+		 * REVISIT the RNDIS gadget code has done this wrong for a
+		 * very long time.  We need another call to the link layer
+		 * code -- gether_updown(...bool) maybe -- to do it right.
+		 */
+		rndis->port.cdc_filter = 0;
+
+		DBG(cdev, "RNDIS RX/TX early activation ...\n");
+		net = gether_qc_connect(&rndis->port);
+		if (IS_ERR(net))
+			return PTR_ERR(net);
+
+		if (rndis_qc_bam_connect(rndis))
+			goto fail;
+
+		rndis_set_param_dev(rndis->config, net,
+				&rndis->port.cdc_filter);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static void rndis_qc_disable(struct usb_function *f)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+
+	if (!rndis->notify->driver_data)
+		return;
+
+	pr_info("rndis deactivated\n");
+
+	rndis_uninit(rndis->config);
+	gether_qc_disconnect(&rndis->port);
+	rndis_qc_bam_disconnect(rndis);
+
+	usb_ep_disable(rndis->notify);
+	rndis->notify->driver_data = NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested.  A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_qc_open(struct qc_gether *geth)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(&geth->func);
+	struct usb_composite_dev *cdev = geth->func.config->cdev;
+
+	DBG(cdev, "%s\n", __func__);
+
+	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3,
+				rndis_qc_bitrate(cdev->gadget) / 100);
+	rndis_signal_connect(rndis->config);
+}
+
+static void rndis_qc_close(struct qc_gether *geth)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(&geth->func);
+
+	DBG(geth->func.config->cdev, "%s\n", __func__);
+
+	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+	rndis_signal_disconnect(rndis->config);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->ctrl_id = status;
+	rndis_qc_iad_descriptor.bFirstInterface = status;
+
+	rndis_qc_control_intf.bInterfaceNumber = status;
+	rndis_qc_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->data_id = status;
+
+	rndis_qc_data_intf.bInterfaceNumber = status;
+	rndis_qc_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_in_desc);
+	if (!ep)
+		goto fail;
+	rndis->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_out_desc);
+	if (!ep)
+		goto fail;
+	rndis->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is, strictly speaking,
+	 * optional.  We don't treat it that way though!  It's simpler,
+	 * and some newer profiles don't treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &rndis_qc_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	rndis->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!rndis->notify_req)
+		goto fail;
+	rndis->notify_req->buf = kmalloc(RNDIS_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!rndis->notify_req->buf)
+		goto fail;
+	rndis->notify_req->length = RNDIS_QC_STATUS_BYTECOUNT;
+	rndis->notify_req->context = rndis;
+	rndis->notify_req->complete = rndis_qc_response_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(eth_qc_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rndis_qc_hs_in_desc.bEndpointAddress =
+				rndis_qc_fs_in_desc.bEndpointAddress;
+		rndis_qc_hs_out_desc.bEndpointAddress =
+				rndis_qc_fs_out_desc.bEndpointAddress;
+		rndis_qc_hs_notify_desc.bEndpointAddress =
+				rndis_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(eth_qc_hs_function);
+
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		rndis_qc_ss_in_desc.bEndpointAddress =
+				rndis_qc_fs_in_desc.bEndpointAddress;
+		rndis_qc_ss_out_desc.bEndpointAddress =
+				rndis_qc_fs_out_desc.bEndpointAddress;
+		rndis_qc_ss_notify_desc.bEndpointAddress =
+				rndis_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(eth_qc_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	rndis->port.open = rndis_qc_open;
+	rndis->port.close = rndis_qc_close;
+
+	status = rndis_register(rndis_qc_response_available, rndis);
+	if (status < 0)
+		goto fail;
+	rndis->config = status;
+
+	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+	rndis_set_host_mac(rndis->config, rndis->ethaddr);
+
+	if (rndis_set_param_vendor(rndis->config, rndis->vendorID,
+				   rndis->manufacturer))
+			goto fail;
+
+	rndis_set_max_pkt_xfer(rndis->config, rndis->max_pkt_per_xfer);
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			rndis->port.in_ep->name, rndis->port.out_ep->name,
+			rndis->notify->name);
+	return 0;
+
+fail:
+	if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+
+	if (rndis->notify_req) {
+		kfree(rndis->notify_req->buf);
+		usb_ep_free_request(rndis->notify, rndis->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (rndis->notify)
+		rndis->notify->driver_data = NULL;
+	if (rndis->port.out_ep->desc)
+		rndis->port.out_ep->driver_data = NULL;
+	if (rndis->port.in_ep->desc)
+		rndis->port.in_ep->driver_data = NULL;
+
+	pr_err("%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+rndis_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rndis_qc		*rndis = func_to_rndis_qc(f);
+
+	rndis_deregister(rndis->config);
+	rndis_exit();
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(rndis->notify_req->buf);
+	usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+	kfree(rndis);
+}
+
+/* Some controllers can't support RNDIS ... */
+static inline bool can_support_rndis_qc(struct usb_configuration *c)
+{
+	/* everything else is *presumably* fine */
+	return true;
+}
+
+/**
+ * rndis_qc_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+rndis_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	return rndis_qc_bind_config_vendor(c, ethaddr, 0, NULL, 1);
+}
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+					 u32 vendorID, const char *manufacturer,
+					 u8 max_pkt_per_xfer)
+{
+	struct f_rndis_qc	*rndis;
+	int		status;
+
+	if (!can_support_rndis_qc(c) || !ethaddr)
+		return -EINVAL;
+
+	/* setup RNDIS itself */
+	status = rndis_init();
+	if (status < 0)
+		return status;
+
+	status = rndis_qc_bam_setup();
+	if (status) {
+		pr_err("bam setup failed");
+		return status;
+	}
+
+	/* maybe allocate device-global string IDs */
+	if (rndis_qc_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_qc_string_defs[0].id = status;
+		rndis_qc_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_qc_string_defs[1].id = status;
+		rndis_qc_data_intf.iInterface = status;
+
+		/* IAD iFunction label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_qc_string_defs[2].id = status;
+		rndis_qc_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	status = -ENOMEM;
+	rndis = kzalloc(sizeof *rndis, GFP_KERNEL);
+	if (!rndis)
+		goto fail;
+
+	memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
+	rndis->vendorID = vendorID;
+	rndis->manufacturer = manufacturer;
+
+	/* if max_pkt_per_xfer was not configured set to default value */
+	rndis->max_pkt_per_xfer =
+		max_pkt_per_xfer ? max_pkt_per_xfer : DEFAULT_MAX_PKT_PER_XFER;
+
+	/* RNDIS activates when the host changes this filter */
+	rndis->port.cdc_filter = 0;
+
+	/* RNDIS has special (and complex) framing */
+	rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
+	rndis->port.wrap = rndis_qc_add_header;
+	rndis->port.unwrap = rndis_qc_rm_hdr;
+
+	rndis->port.func.name = "rndis";
+	rndis->port.func.strings = rndis_qc_strings;
+	/* descriptors are per-instance copies */
+	rndis->port.func.bind = rndis_qc_bind;
+	rndis->port.func.unbind = rndis_qc_unbind;
+	rndis->port.func.set_alt = rndis_qc_set_alt;
+	rndis->port.func.setup = rndis_qc_setup;
+	rndis->port.func.disable = rndis_qc_disable;
+
+	_rndis_qc = rndis;
+
+	status = usb_add_function(c, &rndis->port.func);
+	if (status) {
+		kfree(rndis);
+fail:
+		rndis_exit();
+	}
+	return status;
+}
+
+static int rndis_qc_open_dev(struct inode *ip, struct file *fp)
+{
+	pr_info("Open rndis QC driver\n");
+
+	if (!_rndis_qc) {
+		pr_err("rndis_qc_dev not created yet\n");
+		return -ENODEV;
+	}
+
+	if (rndis_qc_lock(&_rndis_qc->open_excl)) {
+		pr_err("Already opened\n");
+		return -EBUSY;
+	}
+
+	fp->private_data = _rndis_qc;
+	pr_info("rndis QC file opened\n");
+
+	return 0;
+}
+
+static int rndis_qc_release_dev(struct inode *ip, struct file *fp)
+{
+	struct f_rndis_qc	*rndis = fp->private_data;
+
+	pr_info("Close rndis QC file");
+	rndis_qc_unlock(&rndis->open_excl);
+
+	return 0;
+}
+
+static long rndis_qc_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
+{
+	struct f_rndis_qc	*rndis = fp->private_data;
+	int ret = 0;
+
+	pr_info("Received command %d", cmd);
+
+	if (rndis_qc_lock(&rndis->ioctl_excl))
+		return -EBUSY;
+
+	switch (cmd) {
+	case RNDIS_QC_GET_MAX_PKT_PER_XFER:
+		ret = copy_to_user((void __user *)arg,
+					&rndis->max_pkt_per_xfer,
+					sizeof(rndis->max_pkt_per_xfer));
+		if (ret) {
+			pr_err("copying to user space failed");
+			ret = -EFAULT;
+		}
+		pr_info("Sent max packets per xfer %d",
+				rndis->max_pkt_per_xfer);
+		break;
+	default:
+		pr_err("Unsupported IOCTL");
+		ret = -EINVAL;
+	}
+
+	rndis_qc_unlock(&rndis->ioctl_excl);
+
+	return ret;
+}
+
+static const struct file_operations rndis_qc_fops = {
+	.owner = THIS_MODULE,
+	.open = rndis_qc_open_dev,
+	.release = rndis_qc_release_dev,
+	.unlocked_ioctl	= rndis_qc_ioctl,
+};
+
+static struct miscdevice rndis_qc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_rndis_qc",
+	.fops = &rndis_qc_fops,
+};
+
+static int rndis_qc_init(void)
+{
+	int ret;
+
+	pr_info("initialize rndis QC instance\n");
+
+	ret = misc_register(&rndis_qc_device);
+	if (ret)
+		pr_err("rndis QC driver failed to register");
+
+	return ret;
+}
+
+static void rndis_qc_cleanup(void)
+{
+	pr_info("rndis QC cleanup");
+
+	misc_deregister(&rndis_qc_device);
+	_rndis_qc = NULL;
+}
+
+
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 7b6acc6..3d6ceaa 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -572,6 +572,7 @@
 #ifdef CONFIG_MODEM_SUPPORT
 	usb_ep_fifo_flush(gser->notify);
 	usb_ep_disable(gser->notify);
+	gser->notify->driver_data = NULL;
 #endif
 	gser->online = 0;
 }
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 16c4afb..e0520c7 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -585,8 +585,8 @@
 	resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
 	resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
 	resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
-	resp->MaxPacketsPerTransfer = cpu_to_le32(TX_SKB_HOLD_THRESHOLD);
-	resp->MaxTransferSize = cpu_to_le32(TX_SKB_HOLD_THRESHOLD *
+	resp->MaxPacketsPerTransfer = cpu_to_le32(params->max_pkt_per_xfer);
+	resp->MaxTransferSize = cpu_to_le32(params->max_pkt_per_xfer *
 		(params->dev->mtu
 		+ sizeof(struct ethhdr)
 		+ sizeof(struct rndis_packet_msg_type)
@@ -902,6 +902,8 @@
 			rndis_per_dev_params[i].used = 1;
 			rndis_per_dev_params[i].resp_avail = resp_avail;
 			rndis_per_dev_params[i].v = v;
+			rndis_per_dev_params[i].max_pkt_per_xfer =
+							TX_SKB_HOLD_THRESHOLD;
 			pr_debug("%s: configNr = %d\n", __func__, i);
 			return i;
 		}
@@ -955,6 +957,13 @@
 	return 0;
 }
 
+void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer)
+{
+	pr_debug("%s:\n", __func__);
+
+	rndis_per_dev_params[configNr].max_pkt_per_xfer = max_pkt_per_xfer;
+}
+
 void rndis_add_hdr(struct sk_buff *skb)
 {
 	struct rndis_packet_msg_type *header;
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index 907c330..1f06c42 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -235,6 +235,7 @@
 	struct net_device	*dev;
 
 	u32			vendorID;
+	u8			max_pkt_per_xfer;
 	const char		*vendorDescr;
 	void			(*resp_avail)(void *v);
 	void			*v;
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index 1fade88..a9e5d91 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -98,8 +98,8 @@
 	struct usb_request	*rx_req;
 	struct usb_request	*tx_req;
 
-	u8					src_pipe_idx;
-	u8					dst_pipe_idx;
+	u32					src_pipe_idx;
+	u32					dst_pipe_idx;
 	u8					connection_idx;
 
 	/* stats */
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 73b4e75..a105f5d 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -50,8 +50,8 @@
 	struct usb_request	*rx_req;
 	struct usb_request	*tx_req;
 
-	u8			src_pipe_idx;
-	u8			dst_pipe_idx;
+	u32			src_pipe_idx;
+	u32			dst_pipe_idx;
 	u8			connection_idx;
 };
 
diff --git a/drivers/usb/gadget/u_ctrl_hsuart.c b/drivers/usb/gadget/u_ctrl_hsuart.c
index 7102d81..a55960e 100644
--- a/drivers/usb/gadget/u_ctrl_hsuart.c
+++ b/drivers/usb/gadget/u_ctrl_hsuart.c
@@ -289,7 +289,7 @@
 
 void ghsuart_ctrl_disconnect(void *gptr, int port_num)
 {
-	struct gctrl_port	*port;
+	struct ghsuart_ctrl_port	*port;
 	struct grmnet		*gr = NULL;
 	unsigned long		flags;
 
@@ -300,7 +300,7 @@
 		return;
 	}
 
-	port = gctrl_ports[port_num].port;
+	port = ghsuart_ctrl_ports[port_num].port;
 
 	if (!gptr || !port) {
 		pr_err("%s: grmnet port is null\n", __func__);
@@ -372,7 +372,7 @@
 static void ghsuart_ctrl_port_free(int portno)
 {
 	struct ghsuart_ctrl_port	*port = ghsuart_ctrl_ports[portno].port;
-	struct platform_driver	*pdrv = &gctrl_ports[portno].pdrv;
+	struct platform_driver	*pdrv = &ghsuart_ctrl_ports[portno].pdrv;
 
 	destroy_workqueue(port->wq);
 	if (pdrv)
diff --git a/drivers/usb/gadget/u_data_hsuart.c b/drivers/usb/gadget/u_data_hsuart.c
index 91b1190..74bb93f 100644
--- a/drivers/usb/gadget/u_data_hsuart.c
+++ b/drivers/usb/gadget/u_data_hsuart.c
@@ -843,12 +843,15 @@
 	ghsuart_data_free_buffers(port);
 
 	/* disable endpoints */
-	if (port->in)
+	if (port->in) {
 		usb_ep_disable(port->in);
+		port->in->driver_data = NULL;
+	}
 
-	if (port->out)
+	if (port->out) {
 		usb_ep_disable(port->out);
-
+		port->out->driver_data = NULL;
+	}
 	atomic_set(&port->connected, 0);
 
 	if (port->gtype == USB_GADGET_SERIAL) {
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
index 5e9b0ec..a604e1e 100644
--- a/drivers/usb/gadget/u_sdio.c
+++ b/drivers/usb/gadget/u_sdio.c
@@ -990,8 +990,10 @@
 
 	/* disable endpoints, aborting down any active I/O */
 	usb_ep_disable(gser->out);
+	gser->out->driver_data = NULL;
 
 	usb_ep_disable(gser->in);
+	gser->in->driver_data = NULL;
 
 	spin_lock_irqsave(&port->port_lock, flags);
 	gsdio_free_requests(gser->out, &port->read_pool);
diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c
index a5ceaff..ce285a3 100644
--- a/drivers/usb/gadget/u_smd.c
+++ b/drivers/usb/gadget/u_smd.c
@@ -712,7 +712,9 @@
 
 	/* disable endpoints, aborting down any active I/O */
 	usb_ep_disable(gser->out);
+	gser->out->driver_data = NULL;
 	usb_ep_disable(gser->in);
+	gser->in->driver_data = NULL;
 
 	spin_lock_irqsave(&port->port_lock, flags);
 	gsmd_free_requests(gser->out, &port->read_pool);
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 8a87a6a..c612cb9 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -54,6 +54,10 @@
 	bool					async_int;
 	bool					vbus_on;
 	atomic_t				in_lpm;
+	int					pmic_gpio_dp_irq;
+	bool					pmic_gpio_dp_irq_enabled;
+	uint32_t				pmic_gpio_int_cnt;
+	atomic_t				pm_usage_cnt;
 	struct wake_lock			wlock;
 };
 
@@ -603,6 +607,11 @@
 
 	atomic_set(&mhcd->in_lpm, 1);
 	enable_irq(hcd->irq);
+	if (mhcd->pmic_gpio_dp_irq) {
+		mhcd->pmic_gpio_dp_irq_enabled = 1;
+		enable_irq_wake(mhcd->pmic_gpio_dp_irq);
+		enable_irq(mhcd->pmic_gpio_dp_irq);
+	}
 	wake_unlock(&mhcd->wlock);
 
 	dev_info(mhcd->dev, "EHCI USB in low power mode\n");
@@ -622,6 +631,11 @@
 		return 0;
 	}
 
+	if (mhcd->pmic_gpio_dp_irq_enabled) {
+		disable_irq_wake(mhcd->pmic_gpio_dp_irq);
+		disable_irq_nosync(mhcd->pmic_gpio_dp_irq);
+		mhcd->pmic_gpio_dp_irq_enabled = 0;
+	}
 	wake_lock(&mhcd->wlock);
 
 	/* Vote for TCXO when waking up the phy */
@@ -669,6 +683,11 @@
 		enable_irq(hcd->irq);
 	}
 
+	if (atomic_read(&mhcd->pm_usage_cnt)) {
+		atomic_set(&mhcd->pm_usage_cnt, 0);
+		pm_runtime_put_noidle(mhcd->dev);
+	}
+
 	dev_info(mhcd->dev, "EHCI USB exited from low power mode\n");
 
 	return 0;
@@ -689,6 +708,32 @@
 	return ehci_irq(hcd);
 }
 
+static irqreturn_t msm_ehci_host_wakeup_irq(int irq, void *data)
+{
+
+	struct msm_hcd *mhcd = data;
+
+	mhcd->pmic_gpio_int_cnt++;
+	dev_dbg(mhcd->dev, "%s: hsusb host remote wakeup interrupt cnt: %u\n",
+			__func__, mhcd->pmic_gpio_int_cnt);
+
+
+	wake_lock(&mhcd->wlock);
+
+	if (mhcd->pmic_gpio_dp_irq_enabled) {
+		mhcd->pmic_gpio_dp_irq_enabled = 0;
+		disable_irq_wake(irq);
+		disable_irq_nosync(irq);
+	}
+
+	if (!atomic_read(&mhcd->pm_usage_cnt)) {
+		atomic_set(&mhcd->pm_usage_cnt, 1);
+		pm_runtime_get(mhcd->dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
 static int msm_ehci_reset(struct usb_hcd *hcd)
 {
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -952,6 +997,22 @@
 	 * hence, runtime framework automatically calls this driver's
 	 * runtime APIs based on root-hub's state.
 	 */
+	/* configure pmic_gpio_irq for D+ change */
+	if (pdata && pdata->pmic_gpio_dp_irq)
+		mhcd->pmic_gpio_dp_irq = pdata->pmic_gpio_dp_irq;
+	if (mhcd->pmic_gpio_dp_irq) {
+		ret = request_threaded_irq(mhcd->pmic_gpio_dp_irq, NULL,
+				msm_ehci_host_wakeup_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"msm_ehci_host_wakeup", mhcd);
+		if (!ret) {
+			disable_irq_nosync(mhcd->pmic_gpio_dp_irq);
+		} else {
+			dev_err(&pdev->dev, "request_irq(%d) failed: %d\n",
+					mhcd->pmic_gpio_dp_irq, ret);
+			mhcd->pmic_gpio_dp_irq = 0;
+		}
+	}
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
@@ -984,6 +1045,11 @@
 	struct usb_hcd *hcd = platform_get_drvdata(pdev);
 	struct msm_hcd *mhcd = hcd_to_mhcd(hcd);
 
+	if (mhcd->pmic_gpio_dp_irq) {
+		if (mhcd->pmic_gpio_dp_irq_enabled)
+			disable_irq_wake(mhcd->pmic_gpio_dp_irq);
+		free_irq(mhcd->pmic_gpio_dp_irq, mhcd);
+	}
 	device_init_wakeup(&pdev->dev, 0);
 	pm_runtime_disable(&pdev->dev);
 	pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 13828e0..3aa2e5c 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -293,3 +293,13 @@
 	  driver for dial up network and RMNET.
 	  To compile this driver as a module, choose M here: the module
 	  will be called mdm_bridge. If unsure, choose N.
+
+config USB_QCOM_KS_BRIDGE
+	tristate "USB Qualcomm kick start bridge"
+	depends on USB
+	help
+	  Say Y here if you have a Qualcomm modem device connected via USB that
+	  will be bridged in kernel space. This driver works as a bridge to pass
+	  boot images, ram-dumps and efs sync
+	  To compile this driver as a module, choose M here: the module
+	  will be called ks_bridge. If unsure, choose N.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index b4aee65..447e4d2 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -33,3 +33,4 @@
 obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE_TEST)	+= diag_bridge_test.o
 mdm_bridge-y				:= mdm_ctrl_bridge.o mdm_data_bridge.o
 obj-$(CONFIG_USB_QCOM_MDM_BRIDGE) 	+= mdm_bridge.o
+obj-$(CONFIG_USB_QCOM_KS_BRIDGE)	+= ks_bridge.o
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
new file mode 100644
index 0000000..10cbe59
--- /dev/null
+++ b/drivers/usb/misc/ks_bridge.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* add additional information to our printk's */
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#define DRIVER_DESC	"USB host ks bridge driver"
+#define DRIVER_VERSION	"1.0"
+
+struct data_pkt {
+	int			n_read;
+	char			*buf;
+	size_t			len;
+	struct list_head	list;
+	void			*ctxt;
+};
+
+#define FILE_OPENED		BIT(0)
+#define USB_DEV_CONNECTED	BIT(1)
+#define NO_RX_REQS		10
+#define NO_BRIDGE_INSTANCES	2
+#define BOOT_BRIDGE_INDEX	0
+#define EFS_BRIDGE_INDEX	1
+#define MAX_DATA_PKT_SIZE	16384
+
+struct ks_bridge {
+	char			*name;
+	spinlock_t		lock;
+	struct workqueue_struct	*wq;
+	struct work_struct	to_mdm_work;
+	struct work_struct	start_rx_work;
+	struct list_head	to_mdm_list;
+	struct list_head	to_ks_list;
+	wait_queue_head_t	ks_wait_q;
+
+	/* usb specific */
+	struct usb_device	*udev;
+	struct usb_interface	*ifc;
+	__u8			in_epAddr;
+	__u8			out_epAddr;
+	unsigned int		in_pipe;
+	unsigned int		out_pipe;
+	struct usb_anchor	submitted;
+
+	unsigned long		flags;
+	unsigned int		alloced_read_pkts;
+
+#define DBG_MSG_LEN   40
+#define DBG_MAX_MSG   500
+	unsigned int	dbg_idx;
+	rwlock_t	dbg_lock;
+	char     (dbgbuf[DBG_MAX_MSG])[DBG_MSG_LEN];   /* buffer */
+};
+struct ks_bridge *__ksb[NO_BRIDGE_INSTANCES];
+
+/* by default debugging is enabled */
+static unsigned int enable_dbg = 1;
+module_param(enable_dbg, uint, S_IRUGO | S_IWUSR);
+
+static void
+dbg_log_event(struct ks_bridge *ksb, char *event, int d1, int d2)
+{
+	unsigned long flags;
+	unsigned long long t;
+	unsigned long nanosec;
+
+	if (!enable_dbg)
+		return;
+
+	write_lock_irqsave(&ksb->dbg_lock, flags);
+	t = cpu_clock(smp_processor_id());
+	nanosec = do_div(t, 1000000000)/1000;
+	scnprintf(ksb->dbgbuf[ksb->dbg_idx], DBG_MSG_LEN, "%5lu.%06lu:%s:%x:%x",
+			(unsigned long)t, nanosec, event, d1, d2);
+
+	ksb->dbg_idx++;
+	ksb->dbg_idx = ksb->dbg_idx % DBG_MAX_MSG;
+	write_unlock_irqrestore(&ksb->dbg_lock, flags);
+}
+
+static
+struct data_pkt *ksb_alloc_data_pkt(size_t count, gfp_t flags, void *ctxt)
+{
+	struct data_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct data_pkt), flags);
+	if (!pkt) {
+		pr_err("failed to allocate data packet\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pkt->buf = kmalloc(count, flags);
+	if (!pkt->buf) {
+		pr_err("failed to allocate data buffer\n");
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pkt->len = count;
+	INIT_LIST_HEAD(&pkt->list);
+	pkt->ctxt = ctxt;
+
+	return pkt;
+}
+
+static void ksb_free_data_pkt(struct data_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+
+static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	int ret;
+	unsigned long flags;
+	struct ks_bridge *ksb = fp->private_data;
+	struct data_pkt *pkt;
+	size_t space, copied;
+
+read_start:
+	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+		return -ENODEV;
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	if (list_empty(&ksb->to_ks_list)) {
+		spin_unlock_irqrestore(&ksb->lock, flags);
+		ret = wait_event_interruptible(ksb->ks_wait_q,
+				!list_empty(&ksb->to_ks_list) ||
+				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
+		if (ret < 0)
+			return ret;
+
+		goto read_start;
+	}
+
+	space = count;
+	copied = 0;
+	while (!list_empty(&ksb->to_ks_list) && space) {
+		size_t len;
+
+		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
+		len = min_t(size_t, space, pkt->len);
+		pkt->n_read += len;
+		spin_unlock_irqrestore(&ksb->lock, flags);
+
+		ret = copy_to_user(buf + copied, pkt->buf, len);
+		if (ret) {
+			pr_err("copy_to_user failed err:%d\n", ret);
+			ksb_free_data_pkt(pkt);
+			ksb->alloced_read_pkts--;
+			return ret;
+		}
+
+		space -= len;
+		copied += len;
+
+		spin_lock_irqsave(&ksb->lock, flags);
+		if (pkt->n_read == pkt->len) {
+			list_del_init(&pkt->list);
+			ksb_free_data_pkt(pkt);
+			ksb->alloced_read_pkts--;
+		}
+	}
+	spin_unlock_irqrestore(&ksb->lock, flags);
+
+	dbg_log_event(ksb, "KS_READ", copied, 0);
+
+	pr_debug("count:%d space:%d copied:%d", count, space, copied);
+
+	return copied;
+}
+
+static void ksb_tx_cb(struct urb *urb)
+{
+	struct data_pkt *pkt = urb->context;
+	struct ks_bridge *ksb = pkt->ctxt;
+
+	dbg_log_event(ksb, "C TX_URB", urb->status, 0);
+	pr_debug("status:%d", urb->status);
+
+	if (ksb->ifc)
+		usb_autopm_put_interface_async(ksb->ifc);
+
+	if (urb->status < 0)
+		pr_err_ratelimited("urb failed with err:%d", urb->status);
+
+	ksb_free_data_pkt(pkt);
+}
+
+static void ksb_tomdm_work(struct work_struct *w)
+{
+	struct ks_bridge *ksb = container_of(w, struct ks_bridge, to_mdm_work);
+	struct data_pkt	*pkt;
+	unsigned long flags;
+	struct urb *urb;
+	int ret;
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	while (!list_empty(&ksb->to_mdm_list)
+			&& test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+		pkt = list_first_entry(&ksb->to_mdm_list,
+				struct data_pkt, list);
+		list_del_init(&pkt->list);
+		spin_unlock_irqrestore(&ksb->lock, flags);
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			pr_err_ratelimited("unable to allocate urb");
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+
+		ret = usb_autopm_get_interface(ksb->ifc);
+		if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+			pr_err_ratelimited("autopm_get failed:%d", ret);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+		usb_fill_bulk_urb(urb, ksb->udev, ksb->out_pipe,
+				pkt->buf, pkt->len, ksb_tx_cb, pkt);
+		usb_anchor_urb(urb, &ksb->submitted);
+
+		dbg_log_event(ksb, "S TX_URB", pkt->len, 0);
+
+		ret = usb_submit_urb(urb, GFP_KERNEL);
+		if (ret) {
+			pr_err("out urb submission failed");
+			usb_unanchor_urb(urb);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			usb_autopm_put_interface(ksb->ifc);
+			return;
+		}
+
+		spin_lock_irqsave(&ksb->lock, flags);
+	}
+	spin_unlock_irqrestore(&ksb->lock, flags);
+}
+
+static ssize_t ksb_fs_write(struct file *fp, const char __user *buf,
+				 size_t count, loff_t *pos)
+{
+	int			ret;
+	struct data_pkt		*pkt;
+	unsigned long		flags;
+	struct ks_bridge	*ksb = fp->private_data;
+
+	pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
+	if (IS_ERR(pkt)) {
+		pr_err("unable to allocate data packet");
+		return PTR_ERR(pkt);
+	}
+
+	ret = copy_from_user(pkt->buf, buf, count);
+	if (ret) {
+		pr_err("copy_from_user failed: err:%d", ret);
+		ksb_free_data_pkt(pkt);
+		return ret;
+	}
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	list_add_tail(&pkt->list, &ksb->to_mdm_list);
+	spin_unlock_irqrestore(&ksb->lock, flags);
+
+	queue_work(ksb->wq, &ksb->to_mdm_work);
+
+	return count;
+}
+
+static int efs_fs_open(struct inode *ip, struct file *fp)
+{
+	struct ks_bridge *ksb = __ksb[EFS_BRIDGE_INDEX];
+
+	pr_debug(":%s", ksb->name);
+	dbg_log_event(ksb, "EFS-FS-OPEN", 0, 0);
+
+	if (!ksb) {
+		pr_err("ksb is being removed");
+		return -ENODEV;
+	}
+
+	fp->private_data = ksb;
+	set_bit(FILE_OPENED, &ksb->flags);
+
+	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+		queue_work(ksb->wq, &ksb->start_rx_work);
+
+	return 0;
+}
+
+static int ksb_fs_open(struct inode *ip, struct file *fp)
+{
+	struct ks_bridge *ksb = __ksb[BOOT_BRIDGE_INDEX];
+
+	pr_debug(":%s", ksb->name);
+	dbg_log_event(ksb, "KS-FS-OPEN", 0, 0);
+
+	if (!ksb) {
+		pr_err("ksb is being removed");
+		return -ENODEV;
+	}
+
+	fp->private_data = ksb;
+	set_bit(FILE_OPENED, &ksb->flags);
+
+	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+		queue_work(ksb->wq, &ksb->start_rx_work);
+
+	return 0;
+}
+
+static int ksb_fs_release(struct inode *ip, struct file *fp)
+{
+	struct ks_bridge	*ksb = fp->private_data;
+
+	pr_debug(":%s", ksb->name);
+	dbg_log_event(ksb, "FS-RELEASE", 0, 0);
+
+	clear_bit(FILE_OPENED, &ksb->flags);
+	fp->private_data = NULL;
+
+	return 0;
+}
+
+static const struct file_operations ksb_fops = {
+	.owner = THIS_MODULE,
+	.read = ksb_fs_read,
+	.write = ksb_fs_write,
+	.open = ksb_fs_open,
+	.release = ksb_fs_release,
+};
+
+static struct miscdevice ksb_fboot_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "ks_bridge",
+	.fops = &ksb_fops,
+};
+
+static const struct file_operations efs_fops = {
+	.owner = THIS_MODULE,
+	.read = ksb_fs_read,
+	.write = ksb_fs_write,
+	.open = efs_fs_open,
+	.release = ksb_fs_release,
+};
+
+static struct miscdevice ksb_efs_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "efs_bridge",
+	.fops = &efs_fops,
+};
+
+static const struct usb_device_id ksb_usb_ids[] = {
+	{ USB_DEVICE(0x5c6, 0x9008),
+	.driver_info = (unsigned long)&ksb_fboot_dev, },
+	{ USB_DEVICE(0x5c6, 0x9048),
+	.driver_info = (unsigned long)&ksb_efs_dev, },
+	{ USB_DEVICE(0x5c6, 0x904C),
+	.driver_info = (unsigned long)&ksb_efs_dev, },
+
+	{} /* terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, ksb_usb_ids);
+
+static void ksb_rx_cb(struct urb *urb);
+static void submit_one_urb(struct ks_bridge *ksb)
+{
+	struct data_pkt	*pkt;
+	struct urb *urb;
+	int ret;
+
+	pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb);
+	if (IS_ERR(pkt)) {
+		pr_err("unable to allocate data pkt");
+		return;
+	}
+
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		pr_err("unable to allocate urb");
+		ksb_free_data_pkt(pkt);
+		return;
+	}
+	ksb->alloced_read_pkts++;
+
+	usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+			pkt->buf, pkt->len,
+			ksb_rx_cb, pkt);
+	usb_anchor_urb(urb, &ksb->submitted);
+
+	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	if (ret) {
+		pr_err("in urb submission failed");
+		usb_unanchor_urb(urb);
+		usb_free_urb(urb);
+		ksb_free_data_pkt(pkt);
+		ksb->alloced_read_pkts--;
+		return;
+	}
+
+	usb_free_urb(urb);
+}
+static void ksb_rx_cb(struct urb *urb)
+{
+	struct data_pkt *pkt = urb->context;
+	struct ks_bridge *ksb = pkt->ctxt;
+
+	dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);
+
+	pr_debug("status:%d actual:%d", urb->status, urb->actual_length);
+
+	if (urb->status < 0) {
+		if (urb->status != -ESHUTDOWN && urb->status != -ENOENT)
+			pr_err_ratelimited("urb failed with err:%d",
+					urb->status);
+		ksb_free_data_pkt(pkt);
+		ksb->alloced_read_pkts--;
+		return;
+	}
+
+	if (urb->actual_length == 0) {
+		ksb_free_data_pkt(pkt);
+		ksb->alloced_read_pkts--;
+		goto resubmit_urb;
+	}
+
+	spin_lock(&ksb->lock);
+	pkt->len = urb->actual_length;
+	list_add_tail(&pkt->list, &ksb->to_ks_list);
+	spin_unlock(&ksb->lock);
+
+	/* wake up read thread */
+	wake_up(&ksb->ks_wait_q);
+
+resubmit_urb:
+	submit_one_urb(ksb);
+
+}
+
+static void ksb_start_rx_work(struct work_struct *w)
+{
+	struct ks_bridge *ksb =
+			container_of(w, struct ks_bridge, start_rx_work);
+	struct data_pkt	*pkt;
+	struct urb *urb;
+	int i = 0;
+	int ret;
+
+	for (i = 0; i < NO_RX_REQS; i++) {
+		pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
+		if (IS_ERR(pkt)) {
+			pr_err("unable to allocate data pkt");
+			return;
+		}
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			pr_err("unable to allocate urb");
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+
+		ret = usb_autopm_get_interface(ksb->ifc);
+		if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+			pr_err_ratelimited("autopm_get failed:%d", ret);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+		ksb->alloced_read_pkts++;
+
+		usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+				pkt->buf, pkt->len,
+				ksb_rx_cb, pkt);
+		usb_anchor_urb(urb, &ksb->submitted);
+
+		dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+		ret = usb_submit_urb(urb, GFP_KERNEL);
+		if (ret) {
+			pr_err("in urb submission failed");
+			usb_unanchor_urb(urb);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			ksb->alloced_read_pkts--;
+			usb_autopm_put_interface(ksb->ifc);
+			return;
+		}
+
+		usb_autopm_put_interface_async(ksb->ifc);
+		usb_free_urb(urb);
+	}
+}
+
+static int
+ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
+{
+	__u8				ifc_num;
+	struct usb_host_interface	*ifc_desc;
+	struct usb_endpoint_descriptor	*ep_desc;
+	int				i;
+	struct ks_bridge		*ksb;
+	struct miscdevice		*fs_dev;
+
+	ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;
+
+	switch (id->idProduct) {
+	case 0x9008:
+		if (ifc_num != 0)
+			return -ENODEV;
+		ksb = __ksb[BOOT_BRIDGE_INDEX];
+		break;
+	case 0x9048:
+	case 0x904C:
+		if (ifc_num != 2)
+			return -ENODEV;
+		ksb = __ksb[EFS_BRIDGE_INDEX];
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	if (!ksb) {
+		pr_err("ksb is not initialized");
+		return -ENODEV;
+	}
+
+	ksb->udev = usb_get_dev(interface_to_usbdev(ifc));
+	ksb->ifc = ifc;
+	ifc_desc = ifc->cur_altsetting;
+
+	for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
+		ep_desc = &ifc_desc->endpoint[i].desc;
+
+		if (!ksb->in_epAddr && usb_endpoint_is_bulk_in(ep_desc))
+			ksb->in_epAddr = ep_desc->bEndpointAddress;
+
+		if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
+			ksb->out_epAddr = ep_desc->bEndpointAddress;
+	}
+
+	if (!(ksb->in_epAddr && ksb->out_epAddr)) {
+		pr_err("could not find bulk in and bulk out endpoints");
+		usb_put_dev(ksb->udev);
+		ksb->ifc = NULL;
+		return -ENODEV;
+	}
+
+	ksb->in_pipe = usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr);
+	ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr);
+
+	usb_set_intfdata(ifc, ksb);
+	set_bit(USB_DEV_CONNECTED, &ksb->flags);
+
+	dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);
+
+	fs_dev = (struct miscdevice *)id->driver_info;
+	misc_register(fs_dev);
+
+	usb_enable_autosuspend(ksb->udev);
+
+	pr_debug("usb dev connected");
+
+	return 0;
+}
+
+static int ksb_usb_suspend(struct usb_interface *ifc, pm_message_t message)
+{
+	struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+	dbg_log_event(ksb, "SUSPEND", 0, 0);
+
+	pr_info("read cnt: %d", ksb->alloced_read_pkts);
+
+	usb_kill_anchored_urbs(&ksb->submitted);
+
+	return 0;
+}
+
+static int ksb_usb_resume(struct usb_interface *ifc)
+{
+	struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+	dbg_log_event(ksb, "RESUME", 0, 0);
+
+	if (test_bit(FILE_OPENED, &ksb->flags))
+		queue_work(ksb->wq, &ksb->start_rx_work);
+
+	return 0;
+}
+
+static void ksb_usb_disconnect(struct usb_interface *ifc)
+{
+	struct ks_bridge *ksb = usb_get_intfdata(ifc);
+	unsigned long flags;
+	struct data_pkt *pkt;
+
+	dbg_log_event(ksb, "PID-DETACH", 0, 0);
+
+	clear_bit(USB_DEV_CONNECTED, &ksb->flags);
+	wake_up(&ksb->ks_wait_q);
+	cancel_work_sync(&ksb->to_mdm_work);
+
+	usb_kill_anchored_urbs(&ksb->submitted);
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	while (!list_empty(&ksb->to_ks_list)) {
+		pkt = list_first_entry(&ksb->to_ks_list,
+				struct data_pkt, list);
+		list_del_init(&pkt->list);
+		ksb_free_data_pkt(pkt);
+	}
+	while (!list_empty(&ksb->to_mdm_list)) {
+		pkt = list_first_entry(&ksb->to_mdm_list,
+				struct data_pkt, list);
+		list_del_init(&pkt->list);
+		ksb_free_data_pkt(pkt);
+	}
+	spin_unlock_irqrestore(&ksb->lock, flags);
+
+	usb_put_dev(ksb->udev);
+	ksb->ifc = NULL;
+	usb_set_intfdata(ifc, NULL);
+
+	return;
+}
+
+static struct usb_driver ksb_usb_driver = {
+	.name =		"ks_bridge",
+	.probe =	ksb_usb_probe,
+	.disconnect =	ksb_usb_disconnect,
+	.suspend =	ksb_usb_suspend,
+	.resume =	ksb_usb_resume,
+	.id_table =	ksb_usb_ids,
+	.supports_autosuspend = 1,
+};
+
+static ssize_t ksb_debug_show(struct seq_file *s, void *unused)
+{
+	unsigned long		flags;
+	struct ks_bridge	*ksb = s->private;
+	int			i;
+
+	read_lock_irqsave(&ksb->dbg_lock, flags);
+	for (i = 0; i < DBG_MAX_MSG; i++) {
+		if (i == (ksb->dbg_idx - 1))
+			seq_printf(s, "-->%s\n", ksb->dbgbuf[i]);
+		else
+			seq_printf(s, "%s\n", ksb->dbgbuf[i]);
+	}
+	read_unlock_irqrestore(&ksb->dbg_lock, flags);
+
+	return 0;
+}
+
+static int ksb_debug_open(struct inode *ip, struct file *fp)
+{
+	return single_open(fp, ksb_debug_show, ip->i_private);
+
+	return 0;
+}
+
+static const struct file_operations dbg_fops = {
+	.open = ksb_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+static struct dentry *dbg_dir;
+static int __init ksb_init(void)
+{
+	struct ks_bridge *ksb;
+	int num_instances = 0;
+	int ret = 0;
+	int i;
+
+	dbg_dir = debugfs_create_dir("ks_bridge", NULL);
+	if (IS_ERR(dbg_dir))
+		pr_err("unable to create debug dir");
+
+	for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+		ksb = kzalloc(sizeof(struct ks_bridge), GFP_KERNEL);
+		if (!ksb) {
+			pr_err("unable to allocat mem for ks_bridge");
+			return -ENOMEM;
+		}
+		__ksb[i] = ksb;
+
+		ksb->name = kasprintf(GFP_KERNEL, "ks_bridge:%i", i + 1);
+		if (!ksb->name) {
+			pr_info("unable to allocate name");
+			kfree(ksb);
+			ret = -ENOMEM;
+			goto dev_free;
+		}
+
+		spin_lock_init(&ksb->lock);
+		INIT_LIST_HEAD(&ksb->to_mdm_list);
+		INIT_LIST_HEAD(&ksb->to_ks_list);
+		init_waitqueue_head(&ksb->ks_wait_q);
+		ksb->wq = create_singlethread_workqueue(ksb->name);
+		if (!ksb->wq) {
+			pr_err("unable to allocate workqueue");
+			kfree(ksb->name);
+			kfree(ksb);
+			ret = -ENOMEM;
+			goto dev_free;
+		}
+
+		INIT_WORK(&ksb->to_mdm_work, ksb_tomdm_work);
+		INIT_WORK(&ksb->start_rx_work, ksb_start_rx_work);
+		init_usb_anchor(&ksb->submitted);
+
+		ksb->dbg_idx = 0;
+		ksb->dbg_lock = __RW_LOCK_UNLOCKED(lck);
+
+		if (!IS_ERR(dbg_dir))
+			debugfs_create_file(ksb->name, S_IRUGO, dbg_dir,
+					ksb, &dbg_fops);
+
+		num_instances++;
+	}
+
+	ret = usb_register(&ksb_usb_driver);
+	if (ret) {
+		pr_err("unable to register ks bridge driver");
+		goto dev_free;
+	}
+
+	pr_info("init done");
+
+	return 0;
+
+dev_free:
+	if (!IS_ERR(dbg_dir))
+		debugfs_remove_recursive(dbg_dir);
+
+	for (i = 0; i < num_instances; i++) {
+		ksb = __ksb[i];
+
+		destroy_workqueue(ksb->wq);
+		kfree(ksb->name);
+		kfree(ksb);
+	}
+
+	return ret;
+
+}
+
+static void __exit ksb_exit(void)
+{
+	struct ks_bridge *ksb;
+	int i;
+
+	if (!IS_ERR(dbg_dir))
+		debugfs_remove_recursive(dbg_dir);
+
+	usb_deregister(&ksb_usb_driver);
+
+	for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+		ksb = __ksb[i];
+
+		destroy_workqueue(ksb->wq);
+		kfree(ksb->name);
+		kfree(ksb);
+	}
+}
+
+module_init(ksb_init);
+module_exit(ksb_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 68500a3..366df67 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -360,7 +360,7 @@
 
 	usb_mark_last_busy(port->serial->dev);
 
-	if (!status && urb->actual_length) {
+	if ((status == -ENOENT || !status) && urb->actual_length) {
 		spin_lock_irqsave(&portdata->in_lock, flags);
 		list_add_tail(&urb->urb_list, &portdata->in_urb_list);
 		spin_unlock_irqrestore(&portdata->in_lock, flags);
@@ -759,7 +759,7 @@
 		b = intfdata->in_flight;
 		spin_unlock_irq(&intfdata->susp_lock);
 
-		if (b)
+		if (b || pm_runtime_autosuspend_expiration(&serial->dev->dev))
 			return -EBUSY;
 	}
 
diff --git a/drivers/video/msm/lcdc.c b/drivers/video/msm/lcdc.c
index 863d59d..2170abe 100644
--- a/drivers/video/msm/lcdc.c
+++ b/drivers/video/msm/lcdc.c
@@ -37,6 +37,7 @@
 
 static int lcdc_off(struct platform_device *pdev);
 static int lcdc_on(struct platform_device *pdev);
+static void cont_splash_clk_ctrl(int enable);
 
 static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
 static int pdev_list_cnt;
@@ -100,6 +101,8 @@
 #endif
 	mfd = platform_get_drvdata(pdev);
 
+	cont_splash_clk_ctrl(0);
+
 	if (lcdc_pdata && lcdc_pdata->lcdc_get_clk)
 		panel_pixclock_freq = lcdc_pdata->lcdc_get_clk();
 
@@ -151,6 +154,20 @@
 	return ret;
 }
 
+static void cont_splash_clk_ctrl(int enable)
+{
+	static int cont_splash_clks_enabled;
+	if (enable && !cont_splash_clks_enabled) {
+		clk_prepare_enable(pixel_mdp_clk);
+		clk_prepare_enable(pixel_lcdc_clk);
+		cont_splash_clks_enabled = 1;
+	} else if (!enable && cont_splash_clks_enabled) {
+		clk_disable_unprepare(pixel_mdp_clk);
+		clk_disable_unprepare(pixel_lcdc_clk);
+		cont_splash_clks_enabled = 0;
+	}
+}
+
 static int lcdc_probe(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
@@ -199,6 +216,8 @@
 	if (!mdp_dev)
 		return -ENOMEM;
 
+	cont_splash_clk_ctrl(1);
+
 	/*
 	 * link to the latest pdev
 	 */
diff --git a/drivers/video/msm/lcdc_truly_ips3p2335.c b/drivers/video/msm/lcdc_truly_ips3p2335.c
index a4a370e..b2f4ab8 100644
--- a/drivers/video/msm/lcdc_truly_ips3p2335.c
+++ b/drivers/video/msm/lcdc_truly_ips3p2335.c
@@ -148,6 +148,13 @@
 
 static int lcdc_truly_panel_on(struct platform_device *pdev)
 {
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+
+	if (!mfd->cont_splash_done) {
+		mfd->cont_splash_done = 1;
+		return 0;
+	}
+
 	/* Configure reset GPIO that drives DAC */
 	if (lcdc_truly_pdata->panel_config_gpio)
 		lcdc_truly_pdata->panel_config_gpio(1);
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index bfaed8d..ee73eea 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -68,6 +68,8 @@
 boolean mdp_current_clk_on = FALSE;
 boolean mdp_is_in_isr = FALSE;
 
+struct vsync vsync_cntrl;
+
 /*
  * legacy mdp_in_processing is only for DMA2-MDDI
  * this applies to DMA2 block only
@@ -95,13 +97,13 @@
 static struct delayed_work mdp_pipe_ctrl_worker;
 
 static boolean mdp_suspended = FALSE;
+ulong mdp4_display_intf;
 DEFINE_MUTEX(mdp_suspend_mutex);
 
 #ifdef CONFIG_FB_MSM_MDP40
 struct mdp_dma_data dma2_data;
 struct mdp_dma_data dma_s_data;
 struct mdp_dma_data dma_e_data;
-ulong mdp4_display_intf;
 #else
 static struct mdp_dma_data dma2_data;
 static struct mdp_dma_data dma_s_data;
@@ -1270,8 +1272,56 @@
 }
 #endif
 
-/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
+static void send_vsync_work(struct work_struct *work)
+{
+	char buf[64];
+	char *envp[2];
 
+	snprintf(buf, sizeof(buf), "VSYNC=%llu",
+			ktime_to_ns(vsync_cntrl.vsync_time));
+	envp[0] = buf;
+	envp[1] = NULL;
+	kobject_uevent_env(&(vsync_cntrl.dev->kobj), KOBJ_CHANGE, envp);
+}
+
+void mdp3_vsync_irq_enable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	outp32(MDP_INTR_CLEAR, intr);
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+void mdp3_vsync_irq_disable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* required to synchronize between frame update and vsync
+	 * since both use the same LCDC_FRAME_START interrupt
+	 */
+	if (intr == LCDC_FRAME_START && dma2_data.waiting == FALSE) {
+		mdp_intr_mask &= ~intr;
+		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	}
+	mdp_disable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+#ifdef CONFIG_FB_MSM_MDP303
+/* vsync_isr_handler: Called from isr context*/
+static void vsync_isr_handler(void)
+{
+	vsync_cntrl.vsync_time = ktime_get();
+	schedule_work(&(vsync_cntrl.vsync_work));
+}
+#endif
+
+/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
 int mdp_ppp_pipe_wait(void)
 {
 	int ret = 1;
@@ -1432,11 +1482,9 @@
 		outpdw(MDP_BASE + 0x0004, 0);
 	} else if (term == MDP_OVERLAY1_TERM) {
 		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		mdp_lut_enable();
 		outpdw(MDP_BASE + 0x0008, 0);
 	} else if (term == MDP_OVERLAY2_TERM) {
 		mdp_pipe_ctrl(MDP_OVERLAY2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		mdp_lut_enable();
 		outpdw(MDP_BASE + 0x00D0, 0);
 	}
 #else
@@ -1731,6 +1779,10 @@
 	if (!mdp_interrupt)
 		goto out;
 
+	/*Primary Vsync interrupt*/
+	if (mdp_interrupt & MDP_PRIM_RDPTR)
+		vsync_isr_handler();
+
 	/* DMA3 TV-Out Start */
 	if (mdp_interrupt & TV_OUT_DMA3_START) {
 		/* let's disable TV out interrupt */
@@ -1778,12 +1830,19 @@
 			dma = &dma2_data;
 			spin_lock_irqsave(&mdp_spin_lock, flag);
 			/* let's disable LCDC interrupt */
-			mdp_intr_mask &= ~LCDC_FRAME_START;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
 			if (dma->waiting) {
 				dma->waiting = FALSE;
 				complete(&dma->comp);
 			}
+
+			if (vsync_cntrl.vsync_irq_enabled)
+				vsync_isr_handler();
+
+			if (!vsync_cntrl.vsync_irq_enabled && !(dma->waiting)) {
+				mdp_intr_mask &= ~LCDC_FRAME_START;
+				outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+			}
+
 			spin_unlock_irqrestore(&mdp_spin_lock, flag);
 		}
 
@@ -1914,7 +1973,7 @@
 	for (i = 0; i < MDP_MAX_BLOCK; i++) {
 		atomic_set(&mdp_block_power_cnt[i], 0);
 	}
-
+	INIT_WORK(&(vsync_cntrl.vsync_work), send_vsync_work);
 #ifdef MSM_FB_ENABLE_DBGFS
 	{
 		struct dentry *root;
@@ -2018,35 +2077,53 @@
 	return ret;
 }
 
+#ifdef CONFIG_FB_MSM_MDP303
+unsigned is_mdp4_hw_reset(void)
+{
+	return 0;
+}
+void mdp4_hw_init(void)
+{
+	/* empty */
+}
+#endif
+
 static int mdp_on(struct platform_device *pdev)
 {
 	int ret = 0;
-#ifdef CONFIG_FB_MSM_MDP40
 	struct msm_fb_data_type *mfd;
-
 	mfd = platform_get_drvdata(pdev);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	mdp_clk_ctrl(1);
-	mdp4_hw_init();
-	outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
-	if (mfd->panel.type == MIPI_CMD_PANEL) {
-		mdp_vsync_cfg_regs(mfd, FALSE);
-		mdp4_dsi_cmd_on(pdev);
-	} else if (mfd->panel.type == MIPI_VIDEO_PANEL)
-		mdp4_dsi_video_on(pdev);
-	else if (mfd->panel.type == HDMI_PANEL ||
-			mfd->panel.type == LCDC_PANEL ||
-			mfd->panel.type == LVDS_PANEL)
-		mdp4_lcdc_on(pdev);
 
-	mdp_clk_ctrl(0);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-#endif
+	if (mdp_rev >= MDP_REV_40) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp_clk_ctrl(1);
+		mdp4_hw_init();
+		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
+		if (mfd->panel.type == MIPI_CMD_PANEL) {
+			mdp_vsync_cfg_regs(mfd, FALSE);
+			mdp4_dsi_cmd_on(pdev);
+		} else if (mfd->panel.type == MIPI_VIDEO_PANEL) {
+			mdp4_dsi_video_on(pdev);
+		} else if (mfd->panel.type == HDMI_PANEL ||
+				mfd->panel.type == LCDC_PANEL ||
+				mfd->panel.type == LVDS_PANEL) {
+			mdp4_lcdc_on(pdev);
+		}
+
+		mdp_clk_ctrl(0);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
+
+	if ((mdp_rev == MDP_REV_303) &&
+			(mfd->panel.type == MIPI_CMD_PANEL))
+		vsync_cntrl.dev = mfd->fbi->dev;
 
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
 	ret = panel_next_on(pdev);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
+
 	mdp_histogram_ctrl_all(TRUE);
 
 	return ret;
@@ -2321,7 +2398,8 @@
 		if (!(mdp_pdata->cont_splash_enabled))
 			mdp4_hw_init();
 #else
-		mdp_hw_init();
+		if (!(mdp_pdata->cont_splash_enabled))
+			mdp_hw_init();
 #endif
 
 #ifdef CONFIG_FB_MSM_OVERLAY
@@ -2359,8 +2437,10 @@
 		if (mdp_pdata->cont_splash_enabled) {
 			mfd->cont_splash_done = 0;
 			if (!contSplash_update_done) {
-				mdp_pipe_ctrl(MDP_CMD_BLOCK,
-					MDP_BLOCK_POWER_ON, FALSE);
+				if (mfd->panel.type == MIPI_VIDEO_PANEL ||
+				    mfd->panel.type == LCDC_PANEL)
+					mdp_pipe_ctrl(MDP_CMD_BLOCK,
+						MDP_BLOCK_POWER_ON, FALSE);
 				contSplash_update_done = 1;
 			}
 		} else
@@ -2505,6 +2585,7 @@
 		mfd->do_histogram = mdp_do_histogram;
 		mfd->start_histogram = mdp_histogram_start;
 		mfd->stop_histogram = mdp_histogram_stop;
+		mfd->vsync_ctrl = mdp_dma_video_vsync_ctrl;
 		if (mfd->panel_info.pdest == DISPLAY_1)
 			mfd->dma = &dma2_data;
 		else {
@@ -2551,6 +2632,7 @@
 		mfd->do_histogram = mdp_do_histogram;
 		mfd->start_histogram = mdp_histogram_start;
 		mfd->stop_histogram = mdp_histogram_stop;
+		mfd->vsync_ctrl = mdp_dma_vsync_ctrl;
 		if (mfd->panel_info.pdest == DISPLAY_1)
 			mfd->dma = &dma2_data;
 		else {
@@ -2618,6 +2700,7 @@
 		}
 #else
 		mfd->dma = &dma2_data;
+		mfd->vsync_ctrl = mdp_dma_lcdc_vsync_ctrl;
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		mdp_intr_mask &= ~MDP_DMA_P_DONE;
 		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
@@ -2670,11 +2753,12 @@
 		mdp_clk_ctrl(0);
 		goto mdp_probe_err;
 	}
-#ifdef CONFIG_FB_MSM_MDP40
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-#endif
+
+	if (mdp_rev >= MDP_REV_40) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
 
 	mdp_clk_ctrl(0);
 
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 2411dca..12bd1d4 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -44,9 +44,10 @@
 extern int mdp_rev;
 extern int mdp_iommu_split_domain;
 extern struct mdp_csc_cfg mdp_csc_convert[4];
-
 extern struct workqueue_struct *mdp_hist_wq;
 
+extern uint32 mdp_intr_mask;
+
 #define MDP4_REVISION_V1		0
 #define MDP4_REVISION_V2		1
 #define MDP4_REVISION_V2_1	2
@@ -90,6 +91,15 @@
 extern unsigned char hdmi_prim_display;
 extern unsigned char hdmi_prim_resolution;
 
+struct vsync {
+	ktime_t vsync_time;
+	struct device *dev;
+	struct work_struct vsync_work;
+	int vsync_irq_enabled;
+};
+
+extern struct vsync vsync_cntrl;
+
 /*
  * MDP Image Structure
  */
@@ -287,6 +297,7 @@
 #define MDP_HISTOGRAM_TERM_DMA_S 0x20000
 #define MDP_HISTOGRAM_TERM_VG_1 0x40000
 #define MDP_HISTOGRAM_TERM_VG_2 0x80000
+#define MDP_VSYNC_TERM 0x1000
 
 #define ACTIVE_START_X_EN BIT(31)
 #define ACTIVE_START_Y_EN BIT(31)
@@ -306,6 +317,7 @@
 #define MDP_PPP_DONE 				BIT(0)
 #define TV_OUT_DMA3_DONE    BIT(6)
 #define TV_ENC_UNDERRUN     BIT(7)
+#define MDP_PRIM_RDPTR      BIT(8)
 #define TV_OUT_DMA3_START   BIT(13)
 #define MDP_HIST_DONE       BIT(20)
 
@@ -812,6 +824,11 @@
 	return 0;
 }
 #endif
+void mdp_dma_vsync_ctrl(int enable);
+void mdp_dma_video_vsync_ctrl(int enable);
+void mdp_dma_lcdc_vsync_ctrl(int enable);
+void mdp3_vsync_irq_enable(int intr, int term);
+void mdp3_vsync_irq_disable(int intr, int term);
 
 #ifdef MDP_HW_VSYNC
 void vsync_clk_enable(void);
@@ -862,6 +879,11 @@
 {
 	/* empty */
 }
+static inline int msmfb_overlay_vsync_ctrl(struct fb_info *info,
+						void __user *argp)
+{
+	return 0;
+}
 #endif
 
 int mdp_ppp_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req);
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 7da011f..5879530 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -1817,7 +1817,13 @@
 		op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
 		outpdw(base + 0x0058, op_mode);
 		outpdw(base + 0x1008, 0);	/* black */
+		/*
+		 * Set src size and dst size same to avoid underruns
+		 */
+		outpdw(base + 0x0000, inpdw(base + 0x0008));
 	} else {
+		u32 src_size = ((pipe->src_h << 16) | pipe->src_w);
+		outpdw(base + 0x0000, src_size);
 		format &= ~MDP4_FORMAT_SOLID_FILL;
 		blend->solidfill_pipe = NULL;
 	}
@@ -2922,30 +2928,32 @@
 
 int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime)
 {
-	if (info->node == 0) {
+	if (!hdmi_prim_display && info->node == 0) {
 		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
 			mdp4_dsi_video_wait4vsync(0, vtime);
 		else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 			mdp4_dsi_cmd_wait4vsync(0, vtime);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 			mdp4_lcdc_wait4vsync(0, vtime);
-	} else if (info->node == 1)
+	} else if (hdmi_prim_display || info->node == 1) {
 		mdp4_dtv_wait4vsync(0, vtime);
+	}
 
 	return 0;
 }
 
 int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable)
 {
-	if (info->node == 0) {
+	if (!hdmi_prim_display && info->node == 0) {
 		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
 			mdp4_dsi_video_vsync_ctrl(0, enable);
 		else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 			mdp4_dsi_cmd_vsync_ctrl(0, enable);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 			mdp4_lcdc_vsync_ctrl(0, enable);
-	} else if (info->node == 1)
+	} else if (hdmi_prim_display || info->node == 1) {
 		mdp4_dtv_vsync_ctrl(0, enable);
+	}
 
 	return 0;
 }
@@ -3291,13 +3299,6 @@
 	},
 };
 
-static int mdp_iommu_fault_handler(struct iommu_domain *domain,
-	struct device *dev, unsigned long iova, int flags)
-{
-	pr_err("MDP IOMMU page fault: iova 0x%lx", iova);
-	return 0;
-}
-
 void mdp4_iommu_attach(void)
 {
 	static int done;
@@ -3328,8 +3329,6 @@
 			if (!domain)
 				continue;
 
-			iommu_set_fault_handler(domain,
-				mdp_iommu_fault_handler);
 			if (iommu_attach_device(domain,	ctx)) {
 				WARN(1, "%s: could not attach domain %d to context %s."
 					" iommu programming will not occur.\n",
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 398b1e6..6445ec1 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -455,6 +455,7 @@
 
 	vctrl->mfd = mfd;
 	vctrl->dev = mfd->fbi->dev;
+	vctrl->fake_vsync = 1;
 
 	/* mdp clock on */
 	mdp_clk_ctrl(1);
@@ -513,6 +514,8 @@
 	pipe->src_w = fbi->var.xres;
 	pipe->src_y = 0;
 	pipe->src_x = 0;
+	pipe->dst_h = fbi->var.yres;
+	pipe->dst_w = fbi->var.xres;
 	pipe->srcp0_ystride = fbi->fix.line_length;
 	pipe->bpp = bpp;
 
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 57a07d0..f3d9e2c 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -73,6 +73,7 @@
 	int dmae_wait_cnt;
 	int wait_vsync_cnt;
 	int blt_change;
+	int fake_vsync;
 	struct mutex update_lock;
 	struct completion dmae_comp;
 	struct completion vsync_comp;
@@ -236,6 +237,11 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 
+	if (vctrl->fake_vsync) {
+		vctrl->fake_vsync = 0;
+		schedule_work(&vctrl->vsync_work);
+	}
+
 	if (vctrl->vsync_irq_enabled == enable)
 		return;
 
@@ -521,6 +527,7 @@
 		return -EINVAL;
 
 	vctrl->dev = mfd->fbi->dev;
+	vctrl->fake_vsync = 1;
 
 	mdp_footswitch_ctrl(TRUE);
 	/* Mdp clock enable */
@@ -587,6 +594,7 @@
 
 	ret = panel_next_off(pdev);
 	mdp_footswitch_ctrl(FALSE);
+	vctrl->fake_vsync = 1;
 
 	/* Mdp clock disable */
 	mdp_clk_ctrl(0);
@@ -700,6 +708,8 @@
 	pipe->src_w = fbi->var.xres;
 	pipe->src_y = 0;
 	pipe->src_x = 0;
+	pipe->dst_h = fbi->var.yres;
+	pipe->dst_w = fbi->var.xres;
 	pipe->srcp0_ystride = fbi->fix.line_length;
 
 	ret = mdp4_overlay_format2pipe(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 2da2052..79bb7c5 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -460,6 +460,7 @@
 
 	vctrl->mfd = mfd;
 	vctrl->dev = mfd->fbi->dev;
+	vctrl->fake_vsync = 1;
 
 	/* mdp clock on */
 	mdp_clk_ctrl(1);
@@ -503,6 +504,8 @@
 	pipe->src_w = fbi->var.xres;
 	pipe->src_y = 0;
 	pipe->src_x = 0;
+	pipe->dst_h = fbi->var.yres;
+	pipe->dst_w = fbi->var.xres;
 
 	if (mfd->display_iova)
 		pipe->srcp0_addr = mfd->display_iova + buf_offset;
diff --git a/drivers/video/msm/mdp_dma.c b/drivers/video/msm/mdp_dma.c
index 3a7513a..a506648 100644
--- a/drivers/video/msm/mdp_dma.c
+++ b/drivers/video/msm/mdp_dma.c
@@ -512,6 +512,23 @@
 	up(&mfd->dma->mutex);
 }
 
+void mdp_dma_vsync_ctrl(int enable)
+{
+	if (vsync_cntrl.vsync_irq_enabled == enable)
+		return;
+
+	vsync_cntrl.vsync_irq_enabled = enable;
+
+	if (enable) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
+		mdp3_vsync_irq_enable(MDP_PRIM_RDPTR, MDP_VSYNC_TERM);
+	} else {
+		mdp3_vsync_irq_disable(MDP_PRIM_RDPTR, MDP_VSYNC_TERM);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
+}
+
 void mdp_lcd_update_workqueue_handler(struct work_struct *work)
 {
 	struct msm_fb_data_type *mfd = NULL;
diff --git a/drivers/video/msm/mdp_dma_dsi_video.c b/drivers/video/msm/mdp_dma_dsi_video.c
index 1ba5b8d..d94896f 100644
--- a/drivers/video/msm/mdp_dma_dsi_video.c
+++ b/drivers/video/msm/mdp_dma_dsi_video.c
@@ -26,6 +26,7 @@
 #include "mdp.h"
 #include "msm_fb.h"
 #include "mdp4.h"
+#include "mipi_dsi.h"
 
 #define DSI_VIDEO_BASE	0xF0000
 #define DMA_P_BASE      0x90000
@@ -86,6 +87,7 @@
 	fbi = mfd->fbi;
 	var = &fbi->var;
 
+	vsync_cntrl.dev = mfd->fbi->dev;
 	bpp = fbi->var.bits_per_pixel / 8;
 	buf = (uint8 *) fbi->fix.smem_start;
 
@@ -128,6 +130,7 @@
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 
+
 	/* starting address */
 	MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x8, (uint32) buf);
 
@@ -191,6 +194,13 @@
 	ctrl_polarity =	(data_en_polarity << 2) |
 		(vsync_polarity << 1) | (hsync_polarity);
 
+	if (!(mfd->cont_splash_done)) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK,
+			MDP_BLOCK_POWER_OFF, FALSE);
+		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+		mipi_dsi_controller_cfg(0);
+	}
+
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc, vsync_pulse_width);
@@ -237,6 +247,22 @@
 	return ret;
 }
 
+void mdp_dma_video_vsync_ctrl(int enable)
+{
+	if (vsync_cntrl.vsync_irq_enabled == enable)
+		return;
+
+	vsync_cntrl.vsync_irq_enabled = enable;
+
+	if (enable) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp3_vsync_irq_enable(LCDC_FRAME_START, MDP_VSYNC_TERM);
+	} else {
+		mdp3_vsync_irq_disable(LCDC_FRAME_START, MDP_VSYNC_TERM);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
+}
+
 void mdp_dsi_video_update(struct msm_fb_data_type *mfd)
 {
 	struct fb_info *fbi = mfd->fbi;
diff --git a/drivers/video/msm/mdp_dma_lcdc.c b/drivers/video/msm/mdp_dma_lcdc.c
index c418e9c..e030c99 100644
--- a/drivers/video/msm/mdp_dma_lcdc.c
+++ b/drivers/video/msm/mdp_dma_lcdc.c
@@ -105,6 +105,7 @@
 
 	fbi = mfd->fbi;
 	var = &fbi->var;
+	vsync_cntrl.dev = mfd->fbi->dev;
 
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
@@ -249,6 +250,12 @@
 	ctrl_polarity =
 	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
 
+	if (!(mfd->cont_splash_done)) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK,
+			MDP_BLOCK_POWER_OFF, FALSE);
+		MDP_OUTP(MDP_BASE + timer_base, 0);
+	}
+
 	MDP_OUTP(MDP_BASE + timer_base + 0x4, hsync_ctrl);
 	MDP_OUTP(MDP_BASE + timer_base + 0x8, vsync_period);
 	MDP_OUTP(MDP_BASE + timer_base + 0xc, vsync_pulse_width * hsync_period);
@@ -321,6 +328,22 @@
 	return ret;
 }
 
+void mdp_dma_lcdc_vsync_ctrl(int enable)
+{
+	if (vsync_cntrl.vsync_irq_enabled == enable)
+		return;
+
+	vsync_cntrl.vsync_irq_enabled = enable;
+
+	if (enable) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp3_vsync_irq_enable(LCDC_FRAME_START, MDP_VSYNC_TERM);
+	} else {
+		mdp3_vsync_irq_disable(LCDC_FRAME_START, MDP_VSYNC_TERM);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
+}
+
 void mdp_lcdc_update(struct msm_fb_data_type *mfd)
 {
 	struct fb_info *fbi = mfd->fbi;
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 492437e..b6294f4 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -7,4 +7,10 @@
 mdss-mdp-objs += mdss_mdp_wb.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
+
+mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o
+mdss-dsi-objs += mdss_dsi_panel.o
+mdss-dsi-objs += msm_mdss_io_8974.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
+
 obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index a58c3e6..6145d67 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -22,6 +22,7 @@
 #define MDSS_REG_READ(addr) readl_relaxed(mdss_reg_base + addr)
 
 extern unsigned char *mdss_reg_base;
+extern spinlock_t dsi_clk_lock;
 
 enum mdss_mdp_clk_type {
 	MDSS_CLK_AHB,
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
new file mode 100644
index 0000000..d051828
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -0,0 +1,383 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_dsi.h"
+
+static struct mdss_panel_common_pdata *panel_pdata;
+
+static unsigned char *mdss_dsi_base;
+
+static int mdss_dsi_off(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &pdata->panel_info;
+
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+		mdss_dsi_controller_cfg(0, pdata);
+
+	mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
+
+	ret = panel_pdata->off(pdata);
+	if (ret) {
+		pr_err("%s: Panel OFF failed\n", __func__);
+		return ret;
+	}
+
+	spin_lock_bh(&dsi_clk_lock);
+	mdss_dsi_clk_disable();
+
+	/* disable dsi engine */
+	MIPI_OUTP(mdss_dsi_base + 0x0004, 0);
+
+	spin_unlock_bh(&dsi_clk_lock);
+
+	mdss_dsi_unprepare_clocks();
+
+	pr_debug("%s-:\n", __func__);
+
+	return ret;
+}
+
+static int mdss_dsi_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	u32 clk_rate;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	u32 ystride, bpp, data;
+	u32 dummy_xres, dummy_yres;
+
+	pinfo = &pdata->panel_info;
+
+	cont_splash_clk_ctrl(0);
+	mdss_dsi_prepare_clocks();
+
+	spin_lock_bh(&dsi_clk_lock);
+
+	MIPI_OUTP(mdss_dsi_base + 0x118, 1);
+	MIPI_OUTP(mdss_dsi_base + 0x118, 0);
+
+	mdss_dsi_clk_enable();
+	spin_unlock_bh(&dsi_clk_lock);
+
+	clk_rate = pdata->panel_info.clk_rate;
+	clk_rate = min(clk_rate, pdata->panel_info.clk_max);
+
+	hbp = pdata->panel_info.lcdc.h_back_porch;
+	hfp = pdata->panel_info.lcdc.h_front_porch;
+	vbp = pdata->panel_info.lcdc.v_back_porch;
+	vfp = pdata->panel_info.lcdc.v_front_porch;
+	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	vspw = pdata->panel_info.lcdc.v_pulse_width;
+	width = pdata->panel_info.xres;
+	height = pdata->panel_info.yres;
+
+	mipi  = &pdata->panel_info.mipi;
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+		dummy_xres = pdata->panel_info.lcdc.xres_pad;
+		dummy_yres = pdata->panel_info.lcdc.yres_pad;
+
+		MIPI_OUTP(mdss_dsi_base + 0x24,
+			((hspw + hbp + width + dummy_xres) << 16 |
+			(hspw + hbp)));
+		MIPI_OUTP(mdss_dsi_base + 0x28,
+			((vspw + vbp + height + dummy_yres) << 16 |
+			(vspw + vbp)));
+		MIPI_OUTP(mdss_dsi_base + 0x2C,
+			(vspw + vbp + height + dummy_yres +
+				vfp - 1) << 16 | (hspw + hbp +
+				width + dummy_xres + hfp - 1));
+
+		MIPI_OUTP(mdss_dsi_base + 0x30, (hspw << 16));
+		MIPI_OUTP(mdss_dsi_base + 0x34, 0);
+		MIPI_OUTP(mdss_dsi_base + 0x38, (vspw << 16));
+
+	} else {		/* command mode */
+		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+			bpp = 2;
+		else
+			bpp = 3;	/* Default format set to RGB888 */
+
+		ystride = width * bpp + 1;
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
+		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+		MIPI_OUTP(mdss_dsi_base + 0x60, data);
+		MIPI_OUTP(mdss_dsi_base + 0x58, data);
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
+		data = height << 16 | width;
+		MIPI_OUTP(mdss_dsi_base + 0x64, data);
+		MIPI_OUTP(mdss_dsi_base + 0x5C, data);
+	}
+
+	mdss_dsi_host_init(mipi, pdata);
+
+	if (mipi->force_clk_lane_hs) {
+		u32 tmp;
+
+		tmp = MIPI_INP(mdss_dsi_base + 0xac);
+		tmp |= (1<<28);
+		MIPI_OUTP(mdss_dsi_base + 0xac, tmp);
+		wmb();
+	}
+
+	ret = panel_pdata->on(pdata);
+	if (ret) {
+		pr_err("%s: unable to initialize the panel\n", __func__);
+		return ret;
+	}
+
+	mdss_dsi_op_mode_config(mipi->mode, pdata);
+
+	pr_debug("%s-:\n", __func__);
+	return ret;
+}
+
+unsigned char *mdss_dsi_get_base_adr(void)
+{
+	return mdss_dsi_base;
+}
+
+unsigned char *mdss_dsi_get_clk_base(void)
+{
+	return mdss_dsi_base;
+}
+
+static int mdss_dsi_resource_initialized;
+
+static int __devinit mdss_dsi_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	pr_debug("%s\n", __func__);
+
+	if (pdev->dev.of_node && !mdss_dsi_resource_initialized) {
+		struct resource *mdss_dsi_mres;
+		pdev->id = 1;
+		mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!mdss_dsi_mres) {
+			pr_err("%s:%d unable to get the MDSS resources",
+				       __func__, __LINE__);
+			return -ENOMEM;
+		}
+		if (mdss_dsi_mres) {
+			mdss_dsi_base = ioremap(mdss_dsi_mres->start,
+				resource_size(mdss_dsi_mres));
+			if (!mdss_dsi_base) {
+				pr_err("%s:%d unable to remap dsi resources",
+					       __func__, __LINE__);
+				return -ENOMEM;
+			}
+		}
+
+		if (mdss_dsi_clk_init(pdev)) {
+			iounmap(mdss_dsi_base);
+			return -EPERM;
+		}
+
+		rc = of_platform_populate(pdev->dev.of_node,
+					NULL, NULL, &pdev->dev);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: failed to add child nodes, rc=%d\n",
+							__func__, rc);
+			iounmap(mdss_dsi_base);
+			return rc;
+		}
+
+		mdss_dsi_resource_initialized = 1;
+	}
+
+	if (!mdss_dsi_resource_initialized)
+		return -EPERM;
+
+	return 0;
+}
+
+static int __devexit mdss_dsi_remove(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+	iounmap(mdss_dsi_base);
+	return 0;
+}
+
+struct device dsi_dev;
+
+int dsi_panel_device_register(struct platform_device *pdev,
+			      struct mdss_panel_common_pdata *panel_data)
+{
+	struct mipi_panel_info *mipi;
+	int rc;
+	u8 lanes = 0, bpp;
+	u32 h_period, v_period, dsi_pclk_rate;
+	struct mdss_panel_data *pdata = NULL;
+
+	panel_pdata = panel_data;
+
+	h_period = ((panel_pdata->panel_info.lcdc.h_pulse_width)
+			+ (panel_pdata->panel_info.lcdc.h_back_porch)
+			+ (panel_pdata->panel_info.xres)
+			+ (panel_pdata->panel_info.lcdc.h_front_porch));
+
+	v_period = ((panel_pdata->panel_info.lcdc.v_pulse_width)
+			+ (panel_pdata->panel_info.lcdc.v_back_porch)
+			+ (panel_pdata->panel_info.yres)
+			+ (panel_pdata->panel_info.lcdc.v_front_porch));
+
+	mipi  = &panel_pdata->panel_info.mipi;
+
+	panel_pdata->panel_info.type =
+		((mipi->mode == DSI_VIDEO_MODE)
+			? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+	if (mipi->data_lane3)
+		lanes += 1;
+	if (mipi->data_lane2)
+		lanes += 1;
+	if (mipi->data_lane1)
+		lanes += 1;
+	if (mipi->data_lane0)
+		lanes += 1;
+
+
+	if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+	    || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888)
+	    || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE))
+		bpp = 3;
+	else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+		 || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565))
+		bpp = 2;
+	else
+		bpp = 3;		/* Default format set to RGB888 */
+
+	if (panel_pdata->panel_info.type == MIPI_VIDEO_PANEL &&
+		!panel_pdata->panel_info.clk_rate) {
+		h_period += panel_pdata->panel_info.lcdc.xres_pad;
+		v_period += panel_pdata->panel_info.lcdc.yres_pad;
+
+		if (lanes > 0) {
+			panel_pdata->panel_info.clk_rate =
+			((h_period * v_period * (mipi->frame_rate) * bpp * 8)
+			   / lanes);
+		} else {
+			pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+			panel_pdata->panel_info.clk_rate =
+				(h_period * v_period
+					 * (mipi->frame_rate) * bpp * 8);
+		}
+	}
+	pll_divider_config.clk_rate = panel_pdata->panel_info.clk_rate;
+
+	rc = mdss_dsi_clk_div_config(bpp, lanes, &dsi_pclk_rate);
+	if (rc) {
+		pr_err("%s: unable to initialize the clk dividers\n", __func__);
+		return rc;
+	}
+
+	if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 103300000))
+		dsi_pclk_rate = 35000000;
+	mipi->dsi_pclk_rate = dsi_pclk_rate;
+
+	/*
+	 * data chain
+	 */
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	pdata->on = mdss_dsi_on;
+	pdata->off = mdss_dsi_off;
+	memcpy(&(pdata->panel_info), &(panel_pdata->panel_info),
+	       sizeof(struct mdss_panel_info));
+
+	pdata->dsi_base = mdss_dsi_base;
+
+	/*
+	 * register in mdp driver
+	 */
+	rc = mdss_register_panel(pdata);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to register MIPI DSI panel\n");
+		devm_kfree(&pdev->dev, pdata);
+		return rc;
+	}
+
+	pr_debug("%s: Panal data initialized\n", __func__);
+	return 0;
+}
+
+static const struct of_device_id msm_mdss_dsi_dt_match[] = {
+	{.compatible = "qcom,msm-mdss-dsi"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_mdss_dsi_dt_match);
+
+static struct platform_driver mdss_dsi_driver = {
+	.probe = mdss_dsi_probe,
+	.remove = __devexit_p(mdss_dsi_remove),
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdss_dsi",
+		.of_match_table = msm_mdss_dsi_dt_match,
+	},
+};
+
+static int mdss_dsi_register_driver(void)
+{
+	return platform_driver_register(&mdss_dsi_driver);
+}
+
+static int __init mdss_dsi_driver_init(void)
+{
+	int ret;
+
+	mdss_dsi_init();
+
+	ret = mdss_dsi_register_driver();
+	if (ret) {
+		pr_err("mdss_dsi_register_driver() failed!\n");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(mdss_dsi_driver_init);
+
+static void __exit mdss_dsi_driver_cleanup(void)
+{
+	iounmap(mdss_dsi_base);
+	platform_driver_unregister(&mdss_dsi_driver);
+}
+module_exit(mdss_dsi_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DSI controller driver");
+MODULE_AUTHOR("Chandan Uddaraju <chandanu@codeaurora.org>");
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
new file mode 100644
index 0000000..57fce1a
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -0,0 +1,294 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_H
+#define MDSS_DSI_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+
+#include "mdss_panel.h"
+
+#define MMSS_MDSS_CC_BASE_PHY 0xFD8C2300	/* mmss clcok control */
+#define MMSS_SERDES_BASE_PHY 0x04f01000 /* mmss (De)Serializer CFG */
+
+#define MIPI_OUTP(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP(addr) readl_relaxed(addr)
+
+#ifdef CONFIG_MSM_SECURE_IO
+#define MIPI_OUTP_SECURE(addr, data) secure_writel((data), (addr))
+#define MIPI_INP_SECURE(addr) secure_readl(addr)
+#else
+#define MIPI_OUTP_SECURE(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP_SECURE(addr) readl_relaxed(addr)
+#endif
+
+#define MIPI_DSI_PRIM 1
+#define MIPI_DSI_SECD 2
+
+#define MIPI_DSI_PANEL_VGA	0
+#define MIPI_DSI_PANEL_WVGA	1
+#define MIPI_DSI_PANEL_WVGA_PT	2
+#define MIPI_DSI_PANEL_FWVGA_PT	3
+#define MIPI_DSI_PANEL_WSVGA_PT	4
+#define MIPI_DSI_PANEL_QHD_PT 5
+#define MIPI_DSI_PANEL_WXGA	6
+#define MIPI_DSI_PANEL_WUXGA	7
+#define MIPI_DSI_PANEL_720P_PT	8
+#define DSI_PANEL_MAX	8
+
+enum {		/* mipi dsi panel */
+	DSI_VIDEO_MODE,
+	DSI_CMD_MODE,
+};
+
+enum {
+	ST_DSI_CLK_OFF,
+	ST_DSI_SUSPEND,
+	ST_DSI_RESUME,
+	ST_DSI_PLAYING,
+	ST_DSI_NUM
+};
+
+enum {
+	EV_DSI_UPDATE,
+	EV_DSI_DONE,
+	EV_DSI_TOUT,
+	EV_DSI_NUM
+};
+
+enum {
+	LANDSCAPE = 1,
+	PORTRAIT = 2,
+};
+
+enum dsi_trigger_type {
+	DSI_CMD_MODE_DMA,
+	DSI_CMD_MODE_MDP,
+};
+
+#define DSI_NON_BURST_SYNCH_PULSE	0
+#define DSI_NON_BURST_SYNCH_EVENT	1
+#define DSI_BURST_MODE			2
+
+#define DSI_RGB_SWAP_RGB	0
+#define DSI_RGB_SWAP_RBG	1
+#define DSI_RGB_SWAP_BGR	2
+#define DSI_RGB_SWAP_BRG	3
+#define DSI_RGB_SWAP_GRB	4
+#define DSI_RGB_SWAP_GBR	5
+
+#define DSI_VIDEO_DST_FORMAT_RGB565		0
+#define DSI_VIDEO_DST_FORMAT_RGB666		1
+#define DSI_VIDEO_DST_FORMAT_RGB666_LOOSE	2
+#define DSI_VIDEO_DST_FORMAT_RGB888		3
+
+#define DSI_CMD_DST_FORMAT_RGB111	0
+#define DSI_CMD_DST_FORMAT_RGB332	3
+#define DSI_CMD_DST_FORMAT_RGB444	4
+#define DSI_CMD_DST_FORMAT_RGB565	6
+#define DSI_CMD_DST_FORMAT_RGB666	7
+#define DSI_CMD_DST_FORMAT_RGB888	8
+
+#define DSI_INTR_ERROR_MASK		BIT(25)
+#define DSI_INTR_ERROR			BIT(24)
+#define DSI_INTR_VIDEO_DONE_MASK	BIT(17)
+#define DSI_INTR_VIDEO_DONE		BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK	BIT(9)
+#define DSI_INTR_CMD_MDP_DONE		BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK	BIT(1)
+#define DSI_INTR_CMD_DMA_DONE		BIT(0)
+
+#define DSI_CMD_TRIGGER_NONE		0x0	/* mdp trigger */
+#define DSI_CMD_TRIGGER_TE		0x02
+#define DSI_CMD_TRIGGER_SW		0x04
+#define DSI_CMD_TRIGGER_SW_SEOF		0x05	/* cmd dma only */
+#define DSI_CMD_TRIGGER_SW_TE		0x06
+
+extern struct device dsi_dev;
+extern int mdss_dsi_clk_on;
+extern u32 dsi_irq;
+
+struct dsiphy_pll_divider_config {
+	u32 clk_rate;
+	u32 fb_divider;
+	u32 ref_divider_ratio;
+	u32 bit_clk_divider;	/* oCLK1 */
+	u32 byte_clk_divider;	/* oCLK2 */
+	u32 analog_posDiv;
+	u32 digital_posDiv;
+};
+
+extern struct dsiphy_pll_divider_config pll_divider_config;
+
+struct dsi_clk_mnd_table {
+	u8 lanes;
+	u8 bpp;
+	u8 pll_digital_posDiv;
+	u8 pclk_m;
+	u8 pclk_n;
+	u8 pclk_d;
+};
+
+static const struct dsi_clk_mnd_table mnd_table[] = {
+	{ 1, 2,  8, 1, 1, 0},
+	{ 1, 3, 12, 1, 1, 0},
+	{ 2, 2,  4, 1, 1, 0},
+	{ 2, 3,  6, 1, 1, 0},
+	{ 3, 2,  1, 3, 8, 4},
+	{ 3, 3,  4, 1, 1, 0},
+	{ 4, 2,  2, 1, 1, 0},
+	{ 4, 3,  3, 1, 1, 0},
+};
+
+struct dsi_clk_desc {
+	u32 src;
+	u32 m;
+	u32 n;
+	u32 d;
+	u32 mnd_mode;
+	u32 pre_div_func;
+};
+
+#define DSI_HOST_HDR_SIZE	4
+#define DSI_HDR_LAST		BIT(31)
+#define DSI_HDR_LONG_PKT	BIT(30)
+#define DSI_HDR_BTA		BIT(29)
+#define DSI_HDR_VC(vc)		(((vc) & 0x03) << 22)
+#define DSI_HDR_DTYPE(dtype)	(((dtype) & 0x03f) << 16)
+#define DSI_HDR_DATA2(data)	(((data) & 0x0ff) << 8)
+#define DSI_HDR_DATA1(data)	((data) & 0x0ff)
+#define DSI_HDR_WC(wc)		((wc) & 0x0ffff)
+
+#define DSI_BUF_SIZE	1024
+#define MDSS_DSI_MRPS	0x04  /* Maximum Return Packet Size */
+
+#define MDSS_DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align  */
+
+struct dsi_buf {
+	u32 *hdr;	/* dsi host header */
+	char *start;	/* buffer start addr */
+	char *end;	/* buffer end addr */
+	int size;	/* size of buffer */
+	char *data;	/* buffer */
+	int len;	/* data length */
+	dma_addr_t dmap; /* mapped dma addr */
+};
+
+/* dcs read/write */
+#define DTYPE_DCS_WRITE		0x05	/* short write, 0 parameter */
+#define DTYPE_DCS_WRITE1	0x15	/* short write, 1 parameter */
+#define DTYPE_DCS_READ		0x06	/* read */
+#define DTYPE_DCS_LWRITE	0x39	/* long write */
+
+/* generic read/write */
+#define DTYPE_GEN_WRITE		0x03	/* short write, 0 parameter */
+#define DTYPE_GEN_WRITE1	0x13	/* short write, 1 parameter */
+#define DTYPE_GEN_WRITE2	0x23	/* short write, 2 parameter */
+#define DTYPE_GEN_LWRITE	0x29	/* long write */
+#define DTYPE_GEN_READ		0x04	/* long read, 0 parameter */
+#define DTYPE_GEN_READ1		0x14	/* long read, 1 parameter */
+#define DTYPE_GEN_READ2		0x24	/* long read, 2 parameter */
+
+#define DTYPE_TEAR_ON		0x35	/* set tear on */
+#define DTYPE_MAX_PKTSIZE	0x37	/* set max packet size */
+#define DTYPE_NULL_PKT		0x09	/* null packet, no data */
+#define DTYPE_BLANK_PKT		0x19	/* blankiing packet, no data */
+
+#define DTYPE_CM_ON		0x02	/* color mode off */
+#define DTYPE_CM_OFF		0x12	/* color mode on */
+#define DTYPE_PERIPHERAL_OFF	0x22
+#define DTYPE_PERIPHERAL_ON	0x32
+
+/*
+ * dcs response
+ */
+#define DTYPE_ACK_ERR_RESP      0x02
+#define DTYPE_EOT_RESP          0x08    /* end of tx */
+#define DTYPE_GEN_READ1_RESP    0x11    /* 1 parameter, short */
+#define DTYPE_GEN_READ2_RESP    0x12    /* 2 parameter, short */
+#define DTYPE_GEN_LREAD_RESP    0x1a
+#define DTYPE_DCS_LREAD_RESP    0x1c
+#define DTYPE_DCS_READ1_RESP    0x21    /* 1 parameter, short */
+#define DTYPE_DCS_READ2_RESP    0x22    /* 2 parameter, short */
+
+struct dsi_cmd_desc {
+	int dtype;
+	int last;
+	int vc;
+	int ack;	/* ask ACK from peripheral */
+	int wait;
+	int dlen;
+	char *payload;
+};
+
+struct dsi_kickoff_action {
+	struct list_head act_entry;
+	void (*action) (void *);
+	void *data;
+};
+
+struct mdss_panel_common_pdata {
+	struct mdss_panel_info panel_info;
+	int (*on) (struct mdss_panel_data *pdata);
+	int (*off) (struct mdss_panel_data *pdata);
+};
+
+int dsi_panel_device_register(struct platform_device *pdev,
+			      struct mdss_panel_common_pdata *panel_data);
+
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen);
+char *mdss_dsi_buf_init(struct dsi_buf *dp);
+void mdss_dsi_init(void);
+int mdss_dsi_buf_alloc(struct dsi_buf *, int size);
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
+int mdss_dsi_cmds_tx(struct mdss_panel_data *pdata,
+		struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
+
+int mdss_dsi_cmd_dma_tx(struct dsi_buf *dp,
+				struct mdss_panel_data *pdata);
+int mdss_dsi_cmd_reg_tx(u32 data,
+				struct mdss_panel_data *pdata);
+int mdss_dsi_cmds_rx(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int len);
+int mdss_dsi_cmd_dma_rx(struct dsi_buf *tp, int rlen,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_op_mode_config(int mode,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_cmd_mode_ctrl(int enable);
+void mdp4_dsi_cmd_trigger(void);
+void mdss_dsi_cmd_mdp_start(void);
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
+void mdss_dsi_ack_err_status(unsigned char *dsi_base);
+void mdss_dsi_clk_enable(void);
+void mdss_dsi_clk_disable(void);
+void mdss_dsi_controller_cfg(int enable,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_sw_reset(struct mdss_panel_data *pdata);
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr);
+
+void mipi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
+int mdss_dsi_clk_div_config(u8 bpp, u8 lanes,
+			    u32 *expected_dsi_pclk);
+int mdss_dsi_clk_init(struct platform_device *pdev);
+void mdss_dsi_clk_deinit(struct device *dev);
+void mdss_dsi_prepare_clocks(void);
+void mdss_dsi_unprepare_clocks(void);
+void cont_splash_clk_ctrl(int enable);
+unsigned char *mdss_dsi_get_base_adr(void);
+
+#endif /* MDSS_DSI_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
new file mode 100644
index 0000000..7bc0105
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -0,0 +1,1259 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+
+#include "mdss.h"
+#include "mdss_dsi.h"
+
+static struct completion dsi_dma_comp;
+static int dsi_irq_enabled;
+static spinlock_t dsi_irq_lock;
+static spinlock_t dsi_mdp_lock;
+static int dsi_mdp_busy;
+
+spinlock_t dsi_clk_lock;
+
+struct mdss_hw mdss_dsi_hw = {
+	.hw_ndx = MDSS_HW_DSI0,
+	.irq_handler = mdss_dsi_isr,
+};
+
+void mdss_dsi_init(void)
+{
+	init_completion(&dsi_dma_comp);
+	spin_lock_init(&dsi_irq_lock);
+	spin_lock_init(&dsi_mdp_lock);
+	spin_lock_init(&dsi_clk_lock);
+}
+
+void mdss_dsi_enable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_irq_lock, flags);
+	if (dsi_irq_enabled) {
+		pr_debug("%s: IRQ aleady enabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_irq_lock, flags);
+		return;
+	}
+	mdss_enable_irq(&mdss_dsi_hw);
+	dsi_irq_enabled = 1;
+	/* TO DO: Check whether MDSS IRQ is enabled */
+	spin_unlock_irqrestore(&dsi_irq_lock, flags);
+}
+
+void mdss_dsi_disable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_irq_lock, flags);
+	if (dsi_irq_enabled == 0) {
+		pr_debug("%s: IRQ already disabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_irq_lock, flags);
+		return;
+	}
+	mdss_disable_irq(&mdss_dsi_hw);
+	dsi_irq_enabled = 0;
+	/* TO DO: Check whether MDSS IRQ is Disabled */
+	spin_unlock_irqrestore(&dsi_irq_lock, flags);
+}
+
+/*
+ * mdss_dsi_disale_irq_nosync() should be called
+ * from interrupt context
+ */
+void mdss_dsi_disable_irq_nosync(void)
+{
+	spin_lock(&dsi_irq_lock);
+	if (dsi_irq_enabled == 0) {
+		pr_debug("%s: IRQ cannot be disabled\n", __func__);
+		spin_unlock(&dsi_irq_lock);
+		return;
+	}
+
+	dsi_irq_enabled = 0;
+	spin_unlock(&dsi_irq_lock);
+}
+
+/*
+ * mipi dsi buf mechanism
+ */
+char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len)
+{
+	dp->data += len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_unreserve(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_push(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	dp->len += len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen)
+{
+	dp->hdr = (u32 *)dp->data;
+	return mdss_dsi_buf_reserve(dp, hlen);
+}
+
+char *mdss_dsi_buf_init(struct dsi_buf *dp)
+{
+	int off;
+
+	dp->data = dp->start;
+	off = (int)dp->data;
+	/* 8 byte align */
+	off &= 0x07;
+	if (off)
+		off = 8 - off;
+	dp->data += off;
+	dp->len = 0;
+	return dp->data;
+}
+
+int mdss_dsi_buf_alloc(struct dsi_buf *dp, int size)
+{
+
+	dp->start = kmalloc(size, GFP_KERNEL);
+	if (dp->start == NULL) {
+		pr_err("%s:%u\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	dp->end = dp->start + size;
+	dp->size = size;
+
+	if ((int)dp->start & 0x07)
+		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+
+	dp->data = dp->start;
+	dp->len = 0;
+	return size;
+}
+
+/*
+ * mipi dsi generic long write
+ */
+static int mdss_dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	u32 *hp;
+	int i, len;
+
+	bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/* fill up payload */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi generic short write with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * mipi dsi gerneric read with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * mipi dsi dcs long write
+ */
+static int mdss_dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	u32 *hp;
+	int i, len;
+
+	bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/*
+	 * fill up payload
+	 * dcs command byte (first byte) followed by payload
+	 */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 0 parameters
+ */
+static int mdss_dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 1) ? 1 : cm->dlen;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 1 parameters
+ */
+static int mdss_dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->dlen < 2 || cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs comamnd byte */
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);	/* parameter */
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+/*
+ * mipi dsi dcs read with 0 parameters
+ */
+
+static int mdss_dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	int len = 0;
+
+	switch (cm->dtype) {
+	case DTYPE_GEN_WRITE:
+	case DTYPE_GEN_WRITE1:
+	case DTYPE_GEN_WRITE2:
+		len = mdss_dsi_generic_swrite(dp, cm);
+		break;
+	case DTYPE_GEN_LWRITE:
+		len = mdss_dsi_generic_lwrite(dp, cm);
+		break;
+	case DTYPE_GEN_READ:
+	case DTYPE_GEN_READ1:
+	case DTYPE_GEN_READ2:
+		len = mdss_dsi_generic_read(dp, cm);
+		break;
+	case DTYPE_DCS_LWRITE:
+		len = mdss_dsi_dcs_lwrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE:
+		len = mdss_dsi_dcs_swrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE1:
+		len = mdss_dsi_dcs_swrite1(dp, cm);
+		break;
+	case DTYPE_DCS_READ:
+		len = mdss_dsi_dcs_read(dp, cm);
+		break;
+	case DTYPE_MAX_PKTSIZE:
+		len = mdss_dsi_set_max_pktsize(dp, cm);
+		break;
+	case DTYPE_NULL_PKT:
+		len = mdss_dsi_null_pkt(dp, cm);
+		break;
+	case DTYPE_BLANK_PKT:
+		len = mdss_dsi_blank_pkt(dp, cm);
+		break;
+	case DTYPE_CM_ON:
+		len = mdss_dsi_cm_on(dp, cm);
+		break;
+	case DTYPE_CM_OFF:
+		len = mdss_dsi_cm_off(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_ON:
+		len = mdss_dsi_peripheral_on(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_OFF:
+		len = mdss_dsi_peripheral_off(dp, cm);
+		break;
+	default:
+		pr_debug("%s: dtype=%x NOT supported\n",
+					__func__, cm->dtype);
+		break;
+
+	}
+
+	return len;
+}
+
+/*
+ * mdss_dsi_short_read1_resp: 1 parameter
+ */
+static int mdss_dsi_short_read1_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 1;
+	return rp->len;
+}
+
+/*
+ * mdss_dsi_short_read2_resp: 2 parameter
+ */
+static int mdss_dsi_short_read2_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 2;
+	return rp->len;
+}
+
+static int mdss_dsi_long_read_resp(struct dsi_buf *rp)
+{
+	short len;
+
+	len = rp->data[2];
+	len <<= 8;
+	len |= rp->data[1];
+	/* strip out dcs header */
+	rp->data += 4;
+	rp->len -= 4;
+	/* strip out 2 bytes of checksum */
+	rp->len -= 2;
+	return len;
+}
+
+void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
+				struct mdss_panel_data *pdata)
+{
+	u32 dsi_ctrl, intr_ctrl;
+	u32 data;
+
+	pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
+
+	if (pinfo->mode == DSI_VIDEO_MODE) {
+		data = 0;
+		if (pinfo->pulse_mode_hsa_he)
+			data |= BIT(28);
+		if (pinfo->hfp_power_stop)
+			data |= BIT(24);
+		if (pinfo->hbp_power_stop)
+			data |= BIT(20);
+		if (pinfo->hsa_power_stop)
+			data |= BIT(16);
+		if (pinfo->eof_bllp_power_stop)
+			data |= BIT(15);
+		if (pinfo->bllp_power_stop)
+			data |= BIT(12);
+		data |= ((pinfo->traffic_mode & 0x03) << 8);
+		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+		data |= (pinfo->vc & 0x03);
+		MIPI_OUTP((pdata->dsi_base) + 0x0010, data);
+
+		data = 0;
+		data |= ((pinfo->rgb_swap & 0x07) << 12);
+		if (pinfo->b_sel)
+			data |= BIT(8);
+		if (pinfo->g_sel)
+			data |= BIT(4);
+		if (pinfo->r_sel)
+			data |= BIT(0);
+		MIPI_OUTP((pdata->dsi_base) + 0x0020, data);
+	} else if (pinfo->mode == DSI_CMD_MODE) {
+		data = 0;
+		data |= ((pinfo->interleave_max & 0x0f) << 20);
+		data |= ((pinfo->rgb_swap & 0x07) << 16);
+		if (pinfo->b_sel)
+			data |= BIT(12);
+		if (pinfo->g_sel)
+			data |= BIT(8);
+		if (pinfo->r_sel)
+			data |= BIT(4);
+		data |= (pinfo->dst_format & 0x0f);	/* 4 bits */
+		MIPI_OUTP((pdata->dsi_base) + 0x003c, data);
+
+		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+		data = pinfo->wr_mem_continue & 0x0ff;
+		data <<= 8;
+		data |= (pinfo->wr_mem_start & 0x0ff);
+		if (pinfo->insert_dcs_cmd)
+			data |= BIT(16);
+		MIPI_OUTP((pdata->dsi_base) + 0x0044, data);
+	} else
+		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+	dsi_ctrl = BIT(8) | BIT(2);	/* clock enable & cmd mode */
+	intr_ctrl = 0;
+	intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);
+
+	if (pinfo->crc_check)
+		dsi_ctrl |= BIT(24);
+	if (pinfo->ecc_check)
+		dsi_ctrl |= BIT(20);
+	if (pinfo->data_lane3)
+		dsi_ctrl |= BIT(7);
+	if (pinfo->data_lane2)
+		dsi_ctrl |= BIT(6);
+	if (pinfo->data_lane1)
+		dsi_ctrl |= BIT(5);
+	if (pinfo->data_lane0)
+		dsi_ctrl |= BIT(4);
+
+	/* from frame buffer, low power mode */
+	/* DSI_COMMAND_MODE_DMA_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0x3C, 0x14000000);
+
+	data = 0;
+	if (pinfo->te_sel)
+		data |= BIT(31);
+	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+	data |= pinfo->dma_trigger;	/* cmd dma trigger */
+	data |= (pinfo->stream & 0x01) << 8;
+	MIPI_OUTP((pdata->dsi_base) + 0x0084, data); /* DSI_TRIG_CTRL */
+
+	/* DSI_LAN_SWAP_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0x00b0, pinfo->dlane_swap);
+
+	/* clock out ctrl */
+	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
+	data <<= 8;
+	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
+	/* DSI_CLKOUT_TIMING_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0xc4, data);
+
+	data = 0;
+	if (pinfo->rx_eot_ignore)
+		data |= BIT(4);
+	if (pinfo->tx_eot_append)
+		data |= BIT(0);
+	MIPI_OUTP((pdata->dsi_base) + 0x00cc, data); /* DSI_EOT_PACKET_CTRL */
+
+
+	/* allow only ack-err-status  to generate interrupt */
+	/* DSI_ERR_INT_MASK0 */
+	MIPI_OUTP((pdata->dsi_base) + 0x010c, 0x13ff3fe0);
+
+	intr_ctrl |= DSI_INTR_ERROR_MASK;
+	MIPI_OUTP((pdata->dsi_base) + 0x0110, intr_ctrl); /* DSI_INTL_CTRL */
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP((pdata->dsi_base) + 0x11c, 0x23f); /* DSI_CLK_CTRL */
+
+	dsi_ctrl |= BIT(0);	/* enable dsi */
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+
+	wmb();
+}
+
+void mipi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata)
+{
+	u32 data = MIPI_INP((pdata->dsi_base) + 0x3c);
+
+	if (mode == 0)
+		data &= ~BIT(26);
+	else
+		data |= BIT(26);
+
+	MIPI_OUTP((pdata->dsi_base) + 0x3c, data);
+}
+
+void mdss_dsi_sw_reset(struct mdss_panel_data *pdata)
+{
+	MIPI_OUTP((pdata->dsi_base) + 0x118, 0x01);
+	wmb();
+	MIPI_OUTP((pdata->dsi_base) + 0x118, 0x00);
+	wmb();
+}
+
+void mdss_dsi_controller_cfg(int enable,
+			     struct mdss_panel_data *pdata)
+{
+
+	u32 dsi_ctrl;
+	u32 status;
+	u32 sleep_us = 1000;
+	u32 timeout_us = 16000;
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout(((pdata->dsi_base) + 0x0008),
+			   status,
+			   ((status & 0x02) == 0),
+			       sleep_us, timeout_us))
+		pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+	/* Check for x_HS_FIFO_EMPTY */
+	if (readl_poll_timeout(((pdata->dsi_base) + 0x000c),
+			   status,
+			   ((status & 0x11111000) == 0x11111000),
+			       sleep_us, timeout_us))
+		pr_info("%s: FIFO status=%x failed\n", __func__, status);
+
+	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+	if (enable)
+		dsi_ctrl |= 0x01;
+	else
+		dsi_ctrl &= ~0x01;
+
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+	wmb();
+}
+
+void mdss_dsi_op_mode_config(int mode,
+			     struct mdss_panel_data *pdata)
+{
+
+	u32 dsi_ctrl, intr_ctrl;
+
+	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+	dsi_ctrl &= ~0x07;
+	if (mode == DSI_VIDEO_MODE) {
+		dsi_ctrl |= 0x03;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
+	} else {		/* command mode */
+		dsi_ctrl |= 0x05;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+				DSI_INTR_CMD_MDP_DONE_MASK;
+	}
+
+	pr_debug("%s: dsi_ctrl=%x intr=%x\n", __func__, dsi_ctrl, intr_ctrl);
+
+	MIPI_OUTP((pdata->dsi_base) + 0x0110, intr_ctrl); /* DSI_INTL_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+	wmb();
+}
+
+void mdss_dsi_cmd_mdp_start(void)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	mdss_dsi_enable_irq();
+	dsi_mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+}
+
+
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata)
+{
+	u32 status;
+	int timeout_us = 10000;
+
+	MIPI_OUTP((pdata->dsi_base) + 0x098, 0x01);	/* trigger */
+	wmb();
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout(((pdata->dsi_base) + 0x0008),
+				status, ((status & 0x0010) == 0),
+				0, timeout_us))
+		pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+	mdss_dsi_ack_err_status((pdata->dsi_base));
+
+	pr_debug("%s: BTA done, status = %d\n", __func__, status);
+}
+
+int mdss_dsi_cmd_reg_tx(u32 data,
+			struct mdss_panel_data *pdata)
+{
+	int i;
+	char *bp;
+
+	bp = (char *)&data;
+	pr_debug("%s: ", __func__);
+	for (i = 0; i < 4; i++)
+		pr_debug("%x ", *bp++);
+
+	pr_debug("\n");
+
+	MIPI_OUTP((pdata->dsi_base) + 0x0084, 0x04);/* sw trigger */
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, 0x135);
+
+	wmb();
+
+	MIPI_OUTP((pdata->dsi_base) + 0x03c, data);
+	wmb();
+	MIPI_OUTP((pdata->dsi_base) + 0x090, 0x01);	/* trigger */
+	wmb();
+
+	udelay(300);
+
+	return 4;
+}
+
+/*
+ * mdss_dsi_cmds_tx:
+ * ov_mutex need to be acquired before call this function.
+ */
+int mdss_dsi_cmds_tx(struct mdss_panel_data *pdata,
+		struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+{
+	struct dsi_cmd_desc *cm;
+	u32 dsi_ctrl, ctrl;
+	int i, video_mode;
+	unsigned long flag;
+
+	/* turn on cmd mode
+	* for video mode, do not send cmds more than
+	* one pixel line, since it only transmit it
+	* during BLLP.
+	*/
+	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
+	if (video_mode) {
+		ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
+		MIPI_OUTP((pdata->dsi_base) + 0x0004, ctrl);
+	}
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	mdss_dsi_enable_irq();
+	dsi_mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	cm = cmds;
+	mdss_dsi_buf_init(tp);
+	for (i = 0; i < cnt; i++) {
+		mdss_dsi_buf_init(tp);
+		mdss_dsi_cmd_dma_add(tp, cm);
+		mdss_dsi_cmd_dma_tx(tp, pdata);
+		if (cm->wait)
+			msleep(cm->wait);
+		cm++;
+	}
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	dsi_mdp_busy = false;
+	mdss_dsi_disable_irq();
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	if (video_mode)
+		MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl); /* restore */
+
+	return cnt;
+}
+
+/* MDSS_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd[] = {
+	{DTYPE_MAX_PKTSIZE, 1, 0, 0, 0,
+		sizeof(max_pktsize), max_pktsize}
+};
+
+/*
+ * DSI panel reply with  MAX_RETURN_PACKET_SIZE bytes of data
+ * plus DCS header, ECC and CRC for DCS long read response
+ * mdss_dsi_controller only have 4x32 bits register ( 16 bytes) to
+ * hold data per transaction.
+ * MDSS_DSI_LEN equal to 8
+ * len should be either 4 or 8
+ * any return data more than MDSS_DSI_LEN need to be break down
+ * to multiple transactions.
+ *
+ * ov_mutex need to be acquired before call this function.
+ */
+int mdss_dsi_cmds_rx(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int rlen)
+{
+	int cnt, len, diff, pkt_size;
+	unsigned long flag;
+	char cmd;
+
+	if (pdata->panel_info.mipi.no_max_pkt_size)
+		rlen = ALIGN(rlen, 4); /* Only support rlen = 4*n */
+
+	len = rlen;
+	diff = 0;
+
+	if (len <= 2)
+		cnt = 4;	/* short read */
+	else {
+		if (len > MDSS_DSI_LEN)
+			len = MDSS_DSI_LEN;	/* 8 bytes at most */
+
+		len = ALIGN(len, 4); /* len 4 bytes align */
+		diff = len - rlen;
+		/*
+		 * add extra 2 bytes to len to have overall
+		 * packet size is multipe by 4. This also make
+		 * sure 4 bytes dcs headerlocates within a
+		 * 32 bits register after shift in.
+		 * after all, len should be either 6 or 10.
+		 */
+		len += 2;
+		cnt = len + 6; /* 4 bytes header + 2 bytes crc */
+	}
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	mdss_dsi_enable_irq();
+	dsi_mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	if (!pdata->panel_info.mipi.no_max_pkt_size) {
+		/* packet size need to be set at every read */
+		pkt_size = len;
+		max_pktsize[0] = pkt_size;
+		mdss_dsi_buf_init(tp);
+		mdss_dsi_cmd_dma_add(tp, pkt_size_cmd);
+		mdss_dsi_cmd_dma_tx(tp, pdata);
+	}
+
+	mdss_dsi_buf_init(tp);
+	mdss_dsi_cmd_dma_add(tp, cmds);
+
+	/* transmit read comamnd to client */
+	mdss_dsi_cmd_dma_tx(tp, pdata);
+	/*
+	 * once cmd_dma_done interrupt received,
+	 * return data from client is ready and stored
+	 * at RDBK_DATA register already
+	 */
+	mdss_dsi_buf_init(rp);
+	if (pdata->panel_info.mipi.no_max_pkt_size) {
+		/*
+		 * expect rlen = n * 4
+		 * short alignement for start addr
+		 */
+		rp->data += 2;
+	}
+
+	mdss_dsi_cmd_dma_rx(rp, cnt, pdata);
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	dsi_mdp_busy = false;
+	mdss_dsi_disable_irq();
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	if (pdata->panel_info.mipi.no_max_pkt_size) {
+		/*
+		 * remove extra 2 bytes from previous
+		 * rx transaction at shift register
+		 * which was inserted during copy
+		 * shift registers to rx buffer
+		 * rx payload start from long alignment addr
+		 */
+		rp->data += 2;
+	}
+
+	cmd = rp->data[0];
+	switch (cmd) {
+	case DTYPE_ACK_ERR_RESP:
+		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+		break;
+	case DTYPE_GEN_READ1_RESP:
+	case DTYPE_DCS_READ1_RESP:
+		mdss_dsi_short_read1_resp(rp);
+		break;
+	case DTYPE_GEN_READ2_RESP:
+	case DTYPE_DCS_READ2_RESP:
+		mdss_dsi_short_read2_resp(rp);
+		break;
+	case DTYPE_GEN_LREAD_RESP:
+	case DTYPE_DCS_LREAD_RESP:
+		mdss_dsi_long_read_resp(rp);
+		rp->len -= 2; /* extra 2 bytes added */
+		rp->len -= diff; /* align bytes */
+		break;
+	default:
+		break;
+	}
+
+	return rp->len;
+}
+
+int mdss_dsi_cmd_dma_tx(struct dsi_buf *tp,
+			struct mdss_panel_data *pdata)
+{
+	int len;
+	int i;
+	char *bp;
+
+	bp = tp->data;
+
+	pr_debug("%s: ", __func__);
+	for (i = 0; i < tp->len; i++)
+		pr_debug("%x ", *bp++);
+
+	pr_debug("\n");
+
+	len = tp->len;
+	len += 3;
+	len &= ~0x03;	/* multipled by 4 */
+
+	tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&dsi_dev, tp->dmap))
+		pr_err("%s: dmap mapp failed\n", __func__);
+
+	INIT_COMPLETION(dsi_dma_comp);
+
+	MIPI_OUTP((pdata->dsi_base) + 0x048, tp->dmap);
+	MIPI_OUTP((pdata->dsi_base) + 0x04c, len);
+	wmb();
+
+	MIPI_OUTP((pdata->dsi_base) + 0x090, 0x01);	/* trigger */
+	wmb();
+
+	wait_for_completion(&dsi_dma_comp);
+
+	dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE);
+	tp->dmap = 0;
+	return tp->len;
+}
+
+int mdss_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen,
+			struct mdss_panel_data *pdata)
+{
+	u32 *lp, data;
+	int i, off, cnt;
+
+	lp = (u32 *)rp->data;
+	cnt = rlen;
+	cnt += 3;
+	cnt >>= 2;
+
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	off = 0x06c;	/* DSI_RDBK_DATA0 */
+	off += ((cnt - 1) * 4);
+
+
+	for (i = 0; i < cnt; i++) {
+		data = (u32)MIPI_INP((pdata->dsi_base) + off);
+		*lp++ = ntohl(data);	/* to network byte order */
+		off -= 4;
+		rp->len += sizeof(*lp);
+	}
+
+	return rlen;
+}
+
+void mdss_dsi_ack_err_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x0068);/* DSI_ACK_ERR_STATUS */
+
+	if (status) {
+		MIPI_OUTP(dsi_base + 0x0068, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_timeout_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x00c0);/* DSI_TIMEOUT_STATUS */
+	if (status & 0x0111) {
+		MIPI_OUTP(dsi_base + 0x00c0, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_dln0_phy_err(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x00b4);/* DSI_DLN0_PHY_ERR */
+
+	if (status & 0x011111) {
+		MIPI_OUTP(dsi_base + 0x00b4, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_fifo_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x000c);/* DSI_FIFO_STATUS */
+
+	if (status & 0x44444489) {
+		MIPI_OUTP(dsi_base + 0x000c, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x0008);/* DSI_STATUS */
+
+	if (status & 0x80000000) {
+		MIPI_OUTP(dsi_base + 0x0008, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_error(unsigned char *dsi_base)
+{
+	/* DSI_ERR_INT_MASK0 */
+	mdss_dsi_ack_err_status(dsi_base);	/* mask0, 0x01f */
+	mdss_dsi_timeout_status(dsi_base);	/* mask0, 0x0e0 */
+	mdss_dsi_fifo_status(dsi_base);		/* mask0, 0x133d00 */
+	mdss_dsi_status(dsi_base);		/* mask0, 0xc0100 */
+	mdss_dsi_dln0_phy_err(dsi_base);	/* mask0, 0x3e00000 */
+}
+
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr)
+{
+	u32 isr;
+	unsigned char *dsi_base;
+
+	dsi_base = mdss_dsi_get_base_adr();
+	if (!dsi_base)
+		pr_err("%s:%d DSI base adr no Initialized",
+				       __func__, __LINE__);
+
+	isr = MIPI_INP(dsi_base + 0x0110);/* DSI_INTR_CTRL */
+	MIPI_OUTP(dsi_base + 0x0110, isr);
+
+	if (isr & DSI_INTR_ERROR)
+		mdss_dsi_error(dsi_base);
+
+	if (isr & DSI_INTR_VIDEO_DONE) {
+		/*
+		* do something  here
+		*/
+	}
+
+	if (isr & DSI_INTR_CMD_DMA_DONE)
+		complete(&dsi_dma_comp);
+
+	if (isr & DSI_INTR_CMD_MDP_DONE) {
+		spin_lock(&dsi_mdp_lock);
+		dsi_mdp_busy = false;
+		mdss_dsi_disable_irq_nosync();
+		spin_unlock(&dsi_mdp_lock);
+	}
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
new file mode 100644
index 0000000..bfb7fae
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -0,0 +1,358 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "mdss_dsi.h"
+
+#define DT_CMD_HDR 6
+
+static struct dsi_buf dsi_panel_tx_buf;
+static struct dsi_buf dsi_panel_rx_buf;
+
+static struct dsi_cmd_desc *dsi_panel_on_cmds;
+static struct dsi_cmd_desc *dsi_panel_off_cmds;
+static int num_of_on_cmds;
+static int num_of_off_cmds;
+static char *on_cmds, *off_cmds;
+
+static int mdss_dsi_panel_on(struct mdss_panel_data *pdata)
+{
+	struct mipi_panel_info *mipi;
+
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s:%d, debug info (mode) : %d\n", __func__, __LINE__,
+		 mipi->mode);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_on_cmds,
+			num_of_on_cmds);
+	} else {
+		pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
+{
+	struct mipi_panel_info *mipi;
+
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s:%d, debug info\n", __func__, __LINE__);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_off_cmds,
+			num_of_off_cmds);
+	} else {
+		pr_debug("%s:%d, CMD mode not supported", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_panel_parse_dt(struct platform_device *pdev,
+			    struct mdss_panel_common_pdata *panel_data)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 res[6], tmp;
+	int rc, i, len;
+	int cmd_plen, data_offset;
+	const char *data;
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+	if (rc) {
+		pr_err("%s:%d, panel resolution not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_data->panel_info.xres = (!rc ? res[0] : 640);
+	panel_data->panel_info.yres = (!rc ? res[1] : 480);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+	if (rc) {
+		pr_err("%s:%d, panel bpp not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_data->panel_info.bpp = (!rc ? tmp : 24);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-porch-values", res, 6);
+	panel_data->panel_info.lcdc.h_back_porch = (!rc ? res[0] : 6);
+	panel_data->panel_info.lcdc.h_pulse_width = (!rc ? res[1] : 2);
+	panel_data->panel_info.lcdc.h_front_porch = (!rc ? res[2] : 6);
+	panel_data->panel_info.lcdc.v_back_porch = (!rc ? res[3] : 6);
+	panel_data->panel_info.lcdc.v_pulse_width = (!rc ? res[4] : 2);
+	panel_data->panel_info.lcdc.v_front_porch = (!rc ? res[5] : 6);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-underflow-clr", &tmp);
+	panel_data->panel_info.lcdc.underflow_clr = (!rc ? tmp : 0xff);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-bl-levels", res, 2);
+	panel_data->panel_info.bl_min = (!rc ? res[0] : 0);
+	panel_data->panel_info.bl_max = (!rc ? res[1] : 255);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mode", &tmp);
+	panel_data->panel_info.mipi.mode = (!rc ? tmp : DSI_VIDEO_MODE);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-h-pulse-mode", &tmp);
+	panel_data->panel_info.mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-h-power-stop", res, 3);
+	panel_data->panel_info.mipi.hbp_power_stop = (!rc ? res[0] : false);
+	panel_data->panel_info.mipi.hsa_power_stop = (!rc ? res[1] : false);
+	panel_data->panel_info.mipi.hfp_power_stop = (!rc ? res[2] : false);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-bllp-power-stop", res, 2);
+	panel_data->panel_info.mipi.bllp_power_stop =
+					(!rc ? res[0] : false);
+	panel_data->panel_info.mipi.eof_bllp_power_stop =
+					(!rc ? res[1] : false);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-traffic-mode", &tmp);
+	panel_data->panel_info.mipi.traffic_mode =
+			(!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-dst-format", &tmp);
+	panel_data->panel_info.mipi.dst_format =
+			(!rc ? tmp : DSI_VIDEO_DST_FORMAT_RGB888);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-vc", &tmp);
+	panel_data->panel_info.mipi.vc = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-rgb-swap", &tmp);
+	panel_data->panel_info.mipi.rgb_swap = (!rc ? tmp : DSI_RGB_SWAP_RGB);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-data-lanes", res, 4);
+	panel_data->panel_info.mipi.data_lane0 = (!rc ? res[0] : true);
+	panel_data->panel_info.mipi.data_lane1 = (!rc ? res[1] : false);
+	panel_data->panel_info.mipi.data_lane2 = (!rc ? res[2] : false);
+	panel_data->panel_info.mipi.data_lane3 = (!rc ? res[3] : false);
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-t-clk", res, 2);
+	panel_data->panel_info.mipi.t_clk_pre = (!rc ? res[0] : 0x24);
+	panel_data->panel_info.mipi.t_clk_post = (!rc ? res[1] : 0x03);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-stream", &tmp);
+	panel_data->panel_info.mipi.stream = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mdp-tr", &tmp);
+	panel_data->panel_info.mipi.mdp_trigger =
+			(!rc ? tmp : DSI_CMD_TRIGGER_SW);
+	if (panel_data->panel_info.mipi.mdp_trigger > 6) {
+		pr_err("%s:%d, Invalid mdp trigger. Forcing to sw trigger",
+						 __func__, __LINE__);
+		panel_data->panel_info.mipi.mdp_trigger =
+					DSI_CMD_TRIGGER_SW;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dma-tr", &tmp);
+	panel_data->panel_info.mipi.dma_trigger =
+			(!rc ? tmp : DSI_CMD_TRIGGER_SW);
+	if (panel_data->panel_info.mipi.dma_trigger > 6) {
+		pr_err("%s:%d, Invalid dma trigger. Forcing to sw trigger",
+						 __func__, __LINE__);
+		panel_data->panel_info.mipi.dma_trigger =
+					DSI_CMD_TRIGGER_SW;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-frame-rate", &tmp);
+	panel_data->panel_info.mipi.frame_rate = (!rc ? tmp : 60);
+
+	data = of_get_property(np, "qcom,panel-on-cmds", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read ON cmds", __func__, __LINE__);
+		goto error;
+	}
+
+	on_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+	if (!on_cmds)
+		return -ENOMEM;
+
+	memcpy(on_cmds, data, len);
+
+	data_offset = 0;
+	cmd_plen = 0;
+	while ((len - data_offset) >= DT_CMD_HDR) {
+		data_offset += (DT_CMD_HDR - 1);
+		cmd_plen = on_cmds[data_offset++];
+		data_offset += cmd_plen;
+		num_of_on_cmds++;
+	}
+	if (!num_of_on_cmds) {
+		pr_err("%s:%d, No ON cmds specified", __func__, __LINE__);
+		goto error;
+	}
+
+	dsi_panel_on_cmds =
+		kzalloc((num_of_on_cmds * sizeof(struct dsi_cmd_desc)),
+						GFP_KERNEL);
+	if (!dsi_panel_on_cmds)
+		return -ENOMEM;
+
+	data_offset = 0;
+	for (i = 0; i < num_of_on_cmds; i++) {
+		dsi_panel_on_cmds[i].dtype = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].last = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].vc = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].ack = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].wait = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].dlen = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].payload = &on_cmds[data_offset];
+		data_offset += (dsi_panel_on_cmds[i].dlen);
+	}
+
+	if (data_offset != len) {
+		pr_err("%s:%d, Incorrect ON command entries",
+						__func__, __LINE__);
+		goto error;
+	}
+
+	data = of_get_property(np, "qcom,panel-off-cmds", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read OFF cmds", __func__, __LINE__);
+		goto error;
+	}
+
+	off_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+	if (!off_cmds)
+		return -ENOMEM;
+
+	memcpy(off_cmds, data, len);
+
+	data_offset = 0;
+	cmd_plen = 0;
+	while ((len - data_offset) >= DT_CMD_HDR) {
+		data_offset += (DT_CMD_HDR - 1);
+		cmd_plen = off_cmds[data_offset++];
+		data_offset += cmd_plen;
+		num_of_off_cmds++;
+	}
+	if (!num_of_off_cmds) {
+		pr_err("%s:%d, No OFF cmds specified", __func__, __LINE__);
+		goto error;
+	}
+
+	dsi_panel_off_cmds = kzalloc(num_of_off_cmds
+				* sizeof(struct dsi_cmd_desc),
+					GFP_KERNEL);
+	if (!dsi_panel_off_cmds)
+		return -ENOMEM;
+
+	data_offset = 0;
+	for (i = 0; i < num_of_off_cmds; i++) {
+		dsi_panel_off_cmds[i].dtype = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].last = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].vc = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].ack = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].wait = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].dlen = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].payload = &off_cmds[data_offset];
+		data_offset += (dsi_panel_off_cmds[i].dlen);
+	}
+
+	if (data_offset != len) {
+		pr_err("%s:%d, Incorrect OFF command entries",
+						__func__, __LINE__);
+		goto error;
+	}
+
+	return 0;
+error:
+	kfree(dsi_panel_on_cmds);
+	kfree(dsi_panel_off_cmds);
+	kfree(on_cmds);
+	kfree(off_cmds);
+
+	return -EINVAL;
+}
+
+static int __devinit mdss_dsi_panel_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct mdss_panel_common_pdata *vendor_pdata = NULL;
+	static const char *panel_name;
+
+	if (pdev->dev.parent == NULL) {
+		pr_err("%s: parent device missing\n", __func__);
+		return -ENODEV;
+	}
+
+	pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!panel_name)
+		pr_info("%s:%d, panel name not specified\n",
+						__func__, __LINE__);
+	else
+		pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+
+	vendor_pdata = devm_kzalloc(&pdev->dev,
+			sizeof(*vendor_pdata), GFP_KERNEL);
+	if (!vendor_pdata)
+		return -ENOMEM;
+
+	rc = mdss_panel_parse_dt(pdev, vendor_pdata);
+	if (rc) {
+		devm_kfree(&pdev->dev, vendor_pdata);
+		vendor_pdata = NULL;
+		return rc;
+	}
+	vendor_pdata->on = mdss_dsi_panel_on;
+	vendor_pdata->off = mdss_dsi_panel_off;
+
+	rc = dsi_panel_device_register(pdev, vendor_pdata);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static const struct of_device_id mdss_dsi_panel_match[] = {
+	{.compatible = "qcom,mdss-dsi-panel"},
+	{}
+};
+
+static struct platform_driver this_driver = {
+	.probe  = mdss_dsi_panel_probe,
+	.driver = {
+		.name   = "dsi_panel",
+		.of_match_table = mdss_dsi_panel_match,
+	},
+};
+
+static int __init mdss_dsi_panel_init(void)
+{
+	mdss_dsi_buf_alloc(&dsi_panel_tx_buf, DSI_BUF_SIZE);
+	mdss_dsi_buf_alloc(&dsi_panel_rx_buf, DSI_BUF_SIZE);
+
+	return platform_driver_register(&this_driver);
+}
+module_init(mdss_dsi_panel_init);
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index 4ca1dce..8825cc6 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -375,6 +375,7 @@
 #define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN		0x0A8
 #define MDSS_MDP_REG_INTF_FRAME_COUNT			0x0AC
 #define MDSS_MDP_REG_INTF_LINE_COUNT			0x0B0
+#define MDSS_MDP_PANEL_FORMAT_RGB888			0x213F
 
 enum mdss_mdp_pingpong_index {
 	MDSS_MDP_PINGPONG0,
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 21ef290..2f0a1f5 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -135,6 +135,8 @@
 			   p->hsync_skew);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_POLARITY_CTL,
 			   polarity_ctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_PANEL_FORMAT,
+			   MDSS_MDP_PANEL_FORMAT_RGB888);
 
 	return 0;
 }
@@ -297,14 +299,14 @@
 	itp.underflow_clr = pinfo->lcdc.underflow_clr;
 	itp.hsync_skew = pinfo->lcdc.hsync_skew;
 
-	itp.xres = fbi->var.xres;
-	itp.yres = fbi->var.yres;
-	itp.h_back_porch = fbi->var.left_margin;
-	itp.h_front_porch = fbi->var.right_margin;
-	itp.v_back_porch = fbi->var.upper_margin;
-	itp.v_front_porch = fbi->var.lower_margin;
-	itp.hsync_pulse_width = fbi->var.hsync_len;
-	itp.vsync_pulse_width = fbi->var.vsync_len;
+	itp.xres =  pinfo->xres;
+	itp.yres = pinfo->yres;
+	itp.h_back_porch =  pinfo->lcdc.h_back_porch;
+	itp.h_front_porch =  pinfo->lcdc.h_front_porch;
+	itp.v_back_porch =  pinfo->lcdc.v_back_porch;
+	itp.v_front_porch = pinfo->lcdc.h_front_porch;
+	itp.hsync_pulse_width = pinfo->lcdc.h_pulse_width;
+	itp.vsync_pulse_width = pinfo->lcdc.v_pulse_width;
 
 	if (mdss_mdp_video_timegen_setup(ctl, &itp)) {
 		pr_err("unable to get timing parameters\n");
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index 046f666..26e459f 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -235,6 +235,7 @@
 	}
 
 	node->buf_data.num_planes = 1;
+	node->buf_info = *data;
 	buf = &node->buf_data.p[0];
 	buf->addr = (u32) (data->iova + data->offset);
 	buf->len = UINT_MAX; /* trusted source */
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 0411d8e..3ec3a5d 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -167,6 +167,7 @@
 struct mdss_panel_data {
 	struct mdss_panel_info panel_info;
 	void (*set_backlight) (u32 bl_level);
+	unsigned char *dsi_base;
 
 	/* function entry chain */
 	int (*on) (struct mdss_panel_data *pdata);
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index 3be4525..a26d339 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -73,7 +73,7 @@
 	pdata->panel_info.type = WRITEBACK_PANEL;
 	pdata->panel_info.clk_rate = 74250000;
 	pdata->panel_info.pdest = DISPLAY_3;
-	pdata->panel_info.out_format = MDP_RGB_888;
+	pdata->panel_info.out_format = MDP_Y_CBCR_H2V2;
 
 	pdata->on = mdss_wb_on;
 	pdata->off = mdss_wb_off;
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
new file mode 100644
index 0000000..c766ec7
--- /dev/null
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -0,0 +1,199 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <mach/clk.h>
+#include <mach/msm_iomap.h>
+
+#include "mdss_dsi.h"
+
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+#define PWRDN_B BIT(7)
+
+static struct dsi_clk_desc dsi_pclk;
+
+static struct clk *dsi_byte_div_clk;
+static struct clk *dsi_esc_clk;
+
+int mdss_dsi_clk_on;
+
+int mdss_dsi_clk_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	dsi_byte_div_clk = clk_get(dev, "byte_clk");
+	if (IS_ERR(dsi_byte_div_clk)) {
+		pr_err("can't find dsi_byte_div_clk\n");
+		dsi_byte_div_clk = NULL;
+		goto mdss_dsi_clk_err;
+	}
+
+	dsi_esc_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(dsi_esc_clk)) {
+		printk(KERN_ERR "can't find dsi_esc_clk\n");
+		dsi_esc_clk = NULL;
+		goto mdss_dsi_clk_err;
+	}
+
+	return 0;
+
+mdss_dsi_clk_err:
+	mdss_dsi_clk_deinit(dev);
+	return -EPERM;
+}
+
+void mdss_dsi_clk_deinit(struct device *dev)
+{
+	if (dsi_byte_div_clk)
+		clk_put(dsi_byte_div_clk);
+	if (dsi_esc_clk)
+		clk_put(dsi_esc_clk);
+}
+
+#define PREF_DIV_RATIO 27
+struct dsiphy_pll_divider_config pll_divider_config;
+
+int mdss_dsi_clk_div_config(u8 bpp, u8 lanes,
+			    u32 *expected_dsi_pclk)
+{
+	u32 fb_divider, rate, vco;
+	u32 div_ratio = 0;
+	u32 pll_analog_posDiv = 1;
+	struct dsi_clk_mnd_table const *mnd_entry = mnd_table;
+	if (pll_divider_config.clk_rate == 0)
+		pll_divider_config.clk_rate = 454000000;
+
+	rate = (pll_divider_config.clk_rate / 2)
+			 / 1000000; /* Half Bit Clock In Mhz */
+
+	if (rate < 43) {
+		vco = rate * 16;
+		div_ratio = 16;
+		pll_analog_posDiv = 8;
+	} else if (rate < 85) {
+		vco = rate * 8;
+		div_ratio = 8;
+		pll_analog_posDiv = 4;
+	} else if (rate < 170) {
+		vco = rate * 4;
+		div_ratio = 4;
+		pll_analog_posDiv = 2;
+	} else if (rate < 340) {
+		vco = rate * 2;
+		div_ratio = 2;
+		pll_analog_posDiv = 1;
+	} else {
+		/* DSI PLL Direct path configuration */
+		vco = rate * 1;
+		div_ratio = 1;
+		pll_analog_posDiv = 1;
+	}
+
+	/* find the mnd settings from mnd_table entry */
+	for (; mnd_entry != mnd_table + ARRAY_SIZE(mnd_table); ++mnd_entry) {
+		if (((mnd_entry->lanes) == lanes) &&
+			((mnd_entry->bpp) == bpp))
+			break;
+	}
+
+	if (mnd_entry == mnd_table + ARRAY_SIZE(mnd_table)) {
+		pr_err("%s: requested Lanes, %u & BPP, %u, not supported\n",
+			__func__, lanes, bpp);
+		return -EINVAL;
+	}
+	fb_divider = ((vco * PREF_DIV_RATIO) / 27);
+	pll_divider_config.fb_divider = fb_divider;
+	pll_divider_config.ref_divider_ratio = PREF_DIV_RATIO;
+	pll_divider_config.bit_clk_divider = div_ratio;
+	pll_divider_config.byte_clk_divider =
+			pll_divider_config.bit_clk_divider * 8;
+	pll_divider_config.analog_posDiv = pll_analog_posDiv;
+	pll_divider_config.digital_posDiv =
+			(mnd_entry->pll_digital_posDiv) * div_ratio;
+
+	if ((mnd_entry->pclk_d == 0)
+		|| (mnd_entry->pclk_m == 1)) {
+		dsi_pclk.mnd_mode = 0;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.pre_div_func = (mnd_entry->pclk_n - 1);
+	} else {
+		dsi_pclk.mnd_mode = 2;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.m = mnd_entry->pclk_m;
+		dsi_pclk.n = mnd_entry->pclk_n;
+		dsi_pclk.d = mnd_entry->pclk_d;
+	}
+	*expected_dsi_pclk = (((pll_divider_config.clk_rate) * lanes)
+				      / (8 * bpp));
+
+	return 0;
+}
+
+void cont_splash_clk_ctrl(int enable)
+{
+	static int cont_splash_clks_enabled;
+	if (enable && !cont_splash_clks_enabled) {
+			clk_prepare_enable(dsi_byte_div_clk);
+			clk_prepare_enable(dsi_esc_clk);
+			cont_splash_clks_enabled = 1;
+	} else if (!enable && cont_splash_clks_enabled) {
+			clk_disable_unprepare(dsi_byte_div_clk);
+			clk_disable_unprepare(dsi_esc_clk);
+			cont_splash_clks_enabled = 0;
+	}
+}
+
+void mdss_dsi_prepare_clocks(void)
+{
+	clk_prepare(dsi_byte_div_clk);
+	clk_prepare(dsi_esc_clk);
+}
+
+void mdss_dsi_unprepare_clocks(void)
+{
+	clk_unprepare(dsi_esc_clk);
+	clk_unprepare(dsi_byte_div_clk);
+}
+
+void mdss_dsi_clk_enable(void)
+{
+	if (mdss_dsi_clk_on) {
+		pr_info("%s: mdss_dsi_clks already ON\n", __func__);
+		return;
+	}
+
+	if (clk_set_rate(dsi_byte_div_clk, 1) < 0)	/* divided by 1 */
+		pr_err("%s: dsi_byte_div_clk - clk_set_rate failed\n",
+					__func__);
+	if (clk_set_rate(dsi_esc_clk, 2) < 0) /* divided by 2 */
+		pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
+					__func__);
+	clk_enable(dsi_byte_div_clk);
+	clk_enable(dsi_esc_clk);
+	mdss_dsi_clk_on = 1;
+}
+
+void mdss_dsi_clk_disable(void)
+{
+	if (mdss_dsi_clk_on == 0) {
+		pr_info("%s: mdss_dsi_clks already OFF\n", __func__);
+		return;
+	}
+	clk_disable(dsi_esc_clk);
+	clk_disable(dsi_byte_div_clk);
+	mdss_dsi_clk_on = 0;
+}
diff --git a/drivers/video/msm/mipi_NT35510.c b/drivers/video/msm/mipi_NT35510.c
index 04178fa..94c24ee 100644
--- a/drivers/video/msm/mipi_NT35510.c
+++ b/drivers/video/msm/mipi_NT35510.c
@@ -482,6 +482,11 @@
 
 	mipi  = &mfd->panel_info.mipi;
 
+	if (!mfd->cont_splash_done) {
+		mfd->cont_splash_done = 1;
+		return 0;
+	}
+
 	if (mipi_nt35510_pdata && mipi_nt35510_pdata->rotate_panel)
 		rotate = mipi_nt35510_pdata->rotate_panel();
 
diff --git a/drivers/video/msm/msm_dss_io_7x27a.c b/drivers/video/msm/msm_dss_io_7x27a.c
index 17ee976..18e8ac5 100644
--- a/drivers/video/msm/msm_dss_io_7x27a.c
+++ b/drivers/video/msm/msm_dss_io_7x27a.c
@@ -317,6 +317,24 @@
 
 void cont_splash_clk_ctrl(int enable)
 {
+	static int cont_splash_clks_enabled;
+	if (enable && !cont_splash_clks_enabled) {
+		clk_prepare_enable(dsi_ref_clk);
+		clk_prepare_enable(mdp_dsi_pclk);
+		clk_prepare_enable(dsi_byte_div_clk);
+		clk_prepare_enable(dsi_esc_clk);
+		clk_prepare_enable(dsi_pixel_clk);
+		clk_prepare_enable(dsi_clk);
+		cont_splash_clks_enabled = 1;
+	} else if (!enable && cont_splash_clks_enabled) {
+		clk_disable_unprepare(dsi_clk);
+		clk_disable_unprepare(dsi_pixel_clk);
+		clk_disable_unprepare(dsi_esc_clk);
+		clk_disable_unprepare(dsi_byte_div_clk);
+		clk_disable_unprepare(mdp_dsi_pclk);
+		clk_disable_unprepare(dsi_ref_clk);
+		cont_splash_clks_enabled = 0;
+	}
 }
 
 void mipi_dsi_prepare_clocks(void)
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 72e3600..18ec3f1 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -377,6 +377,7 @@
 	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
 		return -ENOMEM;
 
+	vsync_cntrl.dev = mfd->fbi->dev;
 	mfd->panel_info.frame_count = 0;
 	mfd->bl_level = 0;
 	bl_scale = 1024;
@@ -1438,10 +1439,22 @@
 
 	/* cursor memory allocation */
 	if (mfd->cursor_update) {
+		unsigned long cursor_buf_iommu = 0;
 		mfd->cursor_buf = dma_alloc_coherent(NULL,
 					MDP_CURSOR_SIZE,
 					(dma_addr_t *) &mfd->cursor_buf_phys,
 					GFP_KERNEL);
+
+		msm_iommu_map_contig_buffer((unsigned long)mfd->cursor_buf_phys,
+					    DISPLAY_READ_DOMAIN,
+					    GEN_POOL,
+					    MDP_CURSOR_SIZE,
+					    SZ_4K,
+					    0,
+					    &cursor_buf_iommu);
+		if (cursor_buf_iommu)
+			mfd->cursor_buf_phys = (void *)cursor_buf_iommu;
+
 		if (!mfd->cursor_buf)
 			mfd->cursor_update = 0;
 	}
@@ -1483,7 +1496,10 @@
 	ret = 0;
 
 #ifdef CONFIG_HAS_EARLYSUSPEND
-	if (hdmi_prim_display || mfd->panel_info.type != DTV_PANEL) {
+
+	if (hdmi_prim_display ||
+	    (mfd->panel_info.type != DTV_PANEL &&
+	     mfd->panel_info.type != WRITEBACK_PANEL)) {
 		mfd->early_suspend.suspend = msmfb_early_suspend;
 		mfd->early_suspend.resume = msmfb_early_resume;
 		mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
@@ -2804,6 +2820,27 @@
 	return 0;
 }
 
+static int msmfb_vsync_ctrl(struct fb_info *info, void __user *argp)
+{
+	int enable, ret;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+
+	ret = copy_from_user(&enable, argp, sizeof(enable));
+	if (ret) {
+		pr_err("%s:msmfb_overlay_vsync ioctl failed", __func__);
+		return ret;
+	}
+
+	if (mfd->vsync_ctrl)
+		mfd->vsync_ctrl(enable);
+	else {
+		pr_err("%s: Vsync IOCTL not supported", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_FB_MSM_OVERLAY
 static int msmfb_overlay_get(struct fb_info *info, void __user *p)
 {
@@ -2866,25 +2903,6 @@
 	return mdp4_overlay_unset(info, ndx);
 }
 
-static int msmfb_overlay_wait4vsync(struct fb_info *info, void __user *argp)
-{
-	int ret;
-	long long vtime;
-
-	ret = mdp4_overlay_wait4vsync(info, &vtime);
-	if (ret) {
-		pr_err("%s: ioctl failed\n", __func__);
-		return ret;
-	}
-
-	if (copy_to_user(argp, &vtime, sizeof(vtime))) {
-		pr_err("%s: copy2user failed\n", __func__);
-		return -EFAULT;
-	}
-
-	return 0;
-}
-
 static int msmfb_overlay_vsync_ctrl(struct fb_info *info, void __user *argp)
 {
 	int ret;
@@ -3316,16 +3334,6 @@
 
 	switch (cmd) {
 #ifdef CONFIG_FB_MSM_OVERLAY
-	case FBIO_WAITFORVSYNC:
-		down(&msm_fb_ioctl_ppp_sem);
-		ret = msmfb_overlay_wait4vsync(info, argp);
-		up(&msm_fb_ioctl_ppp_sem);
-		break;
-	case MSMFB_OVERLAY_VSYNC_CTRL:
-		down(&msm_fb_ioctl_ppp_sem);
-		ret = msmfb_overlay_vsync_ctrl(info, argp);
-		up(&msm_fb_ioctl_ppp_sem);
-		break;
 	case MSMFB_OVERLAY_GET:
 		down(&msm_fb_ioctl_ppp_sem);
 		ret = msmfb_overlay_get(info, argp);
@@ -3394,6 +3402,15 @@
 		ret = msmfb_overlay_ioctl_writeback_terminate(info);
 		break;
 #endif
+	case MSMFB_VSYNC_CTRL:
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+		down(&msm_fb_ioctl_ppp_sem);
+		if (mdp_rev >= MDP_REV_40)
+			ret = msmfb_overlay_vsync_ctrl(info, argp);
+		else
+			ret = msmfb_vsync_ctrl(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
 	case MSMFB_BLIT:
 		down(&msm_fb_ioctl_ppp_sem);
 		ret = msmfb_blit(info, argp);
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index 0658365..efe6160 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -80,6 +80,7 @@
 	DISP_TARGET dest;
 	struct fb_info *fbi;
 
+	struct device *dev;
 	boolean op_enable;
 	uint32 fb_imgType;
 	boolean sw_currently_refreshing;
@@ -134,6 +135,7 @@
 			      struct mdp_histogram_data *hist);
 	int (*start_histogram) (struct mdp_histogram_start_req *req);
 	int (*stop_histogram) (struct fb_info *info, uint32_t block);
+	void (*vsync_ctrl) (int enable);
 	void *cursor_buf;
 	void *cursor_buf_phys;
 
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 22eaf4f..72fe2e3 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -1302,6 +1302,7 @@
 		ddl_process_decoder_metadata(ddl);
 		vidc_sm_get_aspect_ratio_info(
 			&ddl->shared_mem[ddl->command_channel],
+			decoder->codec.codec,
 			&output_vcd_frm->aspect_ratio_info);
 		ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
 			vcd_status, output_frame,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
index 839a9c1..d45de2d 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
@@ -205,6 +205,10 @@
 #define VIDC_SM_ASPECT_RATIO_INFO_ADDR               0x00c8
 #define VIDC_SM_MPEG4_ASPECT_RATIO_INFO_BMSK         0xf
 #define VIDC_SM_MPEG4_ASPECT_RATIO_INFO_SHFT         0x0
+#define VIDC_SM_MPEG2_ASPECT_RATIO_INFO_BMSK         0x000f0000
+#define VIDC_SM_MPEG2_ASPECT_RATIO_INFO_SHFT         16
+#define VIDC_SM_H264_ASPECT_RATIO_INFO_BMSK          0x00000ff0
+#define VIDC_SM_H264_ASPECT_RATIO_INFO_SHFT          4
 #define VIDC_SM_EXTENDED_PAR_ADDR                    0x00cc
 #define VIDC_SM_EXTENDED_PAR_WIDTH_BMSK              0xffff0000
 #define VIDC_SM_EXTENDED_PAR_WIDTH_SHFT              16
@@ -802,23 +806,160 @@
 }
 
 void vidc_sm_get_aspect_ratio_info(struct ddl_buf_addr *shared_mem,
-	struct vcd_aspect_ratio *aspect_ratio_info)
+	enum vcd_codec codec, struct vcd_aspect_ratio *aspect_ratio_info)
 {
-	u32 extended_par_info = 0;
-	aspect_ratio_info->aspect_ratio = DDL_MEM_READ_32(shared_mem,
+	u32 extended_par_info = 0, aspect_ratio = 0;
+
+	aspect_ratio = DDL_MEM_READ_32(shared_mem,
 				VIDC_SM_ASPECT_RATIO_INFO_ADDR);
 
-	if (aspect_ratio_info->aspect_ratio == 0x0f) {
-		extended_par_info = DDL_MEM_READ_32(shared_mem,
-			VIDC_SM_EXTENDED_PAR_ADDR);
-		aspect_ratio_info->extended_par_width =
-			VIDC_GETFIELD(extended_par_info,
-			VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
-			VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
-		aspect_ratio_info->extended_par_height =
-			VIDC_GETFIELD(extended_par_info,
-			VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
-			VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+	if (codec == VCD_CODEC_H264) {
+		aspect_ratio_info->aspect_ratio =
+			VIDC_GETFIELD(aspect_ratio,
+			VIDC_SM_H264_ASPECT_RATIO_INFO_BMSK,
+			VIDC_SM_H264_ASPECT_RATIO_INFO_SHFT);
+
+		switch (aspect_ratio_info->aspect_ratio) {
+		case 1:
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		case 2:
+			aspect_ratio_info->par_width    = 12;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 3:
+			aspect_ratio_info->par_width    = 10;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 4:
+			aspect_ratio_info->par_width    = 16;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 5:
+			aspect_ratio_info->par_width    = 40;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 6:
+			aspect_ratio_info->par_width    = 24;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 7:
+			aspect_ratio_info->par_width    = 20;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 8:
+			aspect_ratio_info->par_width    = 32;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 9:
+			aspect_ratio_info->par_width    = 80;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 10:
+			aspect_ratio_info->par_width    = 18;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 11:
+			aspect_ratio_info->par_width    = 15;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 12:
+			aspect_ratio_info->par_width    = 64;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 13:
+			aspect_ratio_info->par_width    = 160;
+			aspect_ratio_info->par_height   = 99;
+			break;
+		case 14:
+			aspect_ratio_info->par_width    = 4;
+			aspect_ratio_info->par_height   = 3;
+			break;
+		case 15:
+			aspect_ratio_info->par_width    = 3;
+			aspect_ratio_info->par_height   = 2;
+			break;
+		case 16:
+			aspect_ratio_info->par_width    = 2;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		case 255:
+			extended_par_info = DDL_MEM_READ_32(shared_mem,
+				VIDC_SM_EXTENDED_PAR_ADDR);
+			aspect_ratio_info->par_width =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
+				VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
+			aspect_ratio_info->par_height =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+			break;
+		default:
+			DDL_MSG_ERROR("Incorrect Aspect Ratio.");
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		}
+	} else if ((codec == VCD_CODEC_MPEG4) ||
+		(codec == VCD_CODEC_DIVX_4) ||
+		(codec == VCD_CODEC_DIVX_5) ||
+		(codec == VCD_CODEC_DIVX_6) ||
+		(codec == VCD_CODEC_XVID) ||
+		(codec == VCD_CODEC_MPEG2)) {
+
+		if (codec == VCD_CODEC_MPEG2) {
+			aspect_ratio_info->aspect_ratio =
+				VIDC_GETFIELD(aspect_ratio,
+				VIDC_SM_MPEG2_ASPECT_RATIO_INFO_BMSK,
+				VIDC_SM_MPEG2_ASPECT_RATIO_INFO_SHFT);
+		} else {
+			aspect_ratio_info->aspect_ratio =
+				VIDC_GETFIELD(aspect_ratio,
+				VIDC_SM_MPEG4_ASPECT_RATIO_INFO_BMSK,
+				VIDC_SM_MPEG4_ASPECT_RATIO_INFO_SHFT);
+		}
+
+		switch (aspect_ratio_info->aspect_ratio) {
+		case 1:
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		case 2:
+			aspect_ratio_info->par_width    = 12;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 3:
+			aspect_ratio_info->par_width    = 10;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 4:
+			aspect_ratio_info->par_width    = 16;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 5:
+			aspect_ratio_info->par_width    = 40;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 15:
+			extended_par_info = DDL_MEM_READ_32(shared_mem,
+				VIDC_SM_EXTENDED_PAR_ADDR);
+			aspect_ratio_info->par_width =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
+				VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
+			aspect_ratio_info->par_height =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+			break;
+		default:
+			DDL_MSG_ERROR("Incorrect Aspect Ratio.");
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		}
 	}
 }
 
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
index 6cd75595..1a46c36 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
@@ -180,7 +180,7 @@
 	struct ddl_buf_addr *shared_mem,
 	enum vidc_sm_num_stuff_bytes_consume_info consume_info);
 void vidc_sm_get_aspect_ratio_info(struct ddl_buf_addr *shared_mem,
-	struct vcd_aspect_ratio *aspect_ratio_info);
+	enum vcd_codec codec, struct vcd_aspect_ratio *aspect_ratio_info);
 void vidc_sm_set_encoder_slice_batch_int_ctrl(struct ddl_buf_addr *shared_mem,
 	u32 slice_batch_int_enable);
 void vidc_sm_get_num_slices_comp(struct ddl_buf_addr *shared_mem,
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
index 02b2369..a144e06 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
@@ -42,8 +42,17 @@
 	}
 
 	DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context));
-
 	DDL_BUSY(ddl_context);
+
+	if (res_trk_get_enable_ion()) {
+		VIDC_LOGERR_STRING("ddl_dev_init: ION framework enabled");
+		ddl_context->video_ion_client  =
+			res_trk_get_ion_client();
+		if (!ddl_context->video_ion_client) {
+			VIDC_LOGERR_STRING("ION client create failed");
+			return VCD_ERR_ILLEGAL_OP;
+		}
+	}
 	ddl_context->memtype = res_trk_get_mem_type();
 	if (ddl_context->memtype == -1) {
 		VIDC_LOGERR_STRING("ddl_dev_init:Invalid Memtype");
@@ -161,7 +170,7 @@
 
 	VIDC_LOG_STRING("FW_ENDDONE");
 	ddl_release_context_buffers(ddl_context);
-
+	ddl_context->video_ion_client = NULL;
 	DDL_IDLE(ddl_context);
 
 	return VCD_S_SUCCESS;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
index e1407c8..e6d3527 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,7 @@
 	u32 *align_physical_addr;
 	u32 *align_virtual_addr;
 	struct msm_mapped_buffer *mapped_buffer;
+	struct ion_handle *alloc_handle;
 	u32 buffer_size;
 	enum ddl_mem_area mem_type;
 };
@@ -225,6 +226,7 @@
 	struct ddl_buf_addr dbg_core_dump;
 	u32 enable_dbg_core_dump;
 	struct ddl_client_context *ddl_clients[VCD_MAX_NO_CLIENT];
+	struct ion_client *video_ion_client;
 	u32 device_state;
 	u32 ddl_busy;
 	u32  intr_status;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
index aa0d4b8..21f01d1 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
@@ -13,6 +13,7 @@
 #include <linux/memory_alloc.h>
 #include <media/msm/vidc_type.h>
 #include "vcd_ddl_utils.h"
+#include "vcd_res_tracker_api.h"
 
 #if DEBUG
 #define DBG(x...) printk(KERN_DEBUG x)
@@ -91,103 +92,178 @@
 	u32 alloc_size, flags = 0;
 	struct ddl_context *ddl_context;
 	struct msm_mapped_buffer *mapped_buffer = NULL;
+	unsigned long *kernel_vaddr = NULL;
+	ion_phys_addr_t phyaddr = 0;
+	size_t len = 0;
+	int ret = -EINVAL;
 
 	if (!buff_addr) {
-		ERR("\n%s() Invalid Parameters", __func__);
+		ERR("\n%s() Invalid Parameters\n", __func__);
 		return;
 	}
-
-	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
-
 	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
-
 		guard_bytes = 31;
 		align_mask = 0xFFFFFFE0U;
-
 	} else {
-
 		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
 		align_mask = DDL_TILE_BUF_ALIGN_MASK;
 	}
 	ddl_context = ddl_get_context();
 	alloc_size = sz + guard_bytes;
+	if (res_trk_get_enable_ion()) {
+		if (!ddl_context->video_ion_client)
+			ddl_context->video_ion_client =
+				res_trk_get_ion_client();
+		if (!ddl_context->video_ion_client) {
+			ERR("\n%s(): DDL ION Client Invalid handle\n",
+				__func__);
+			goto bailout;
+		}
+		buff_addr->mem_type = res_trk_get_mem_type();
+		buff_addr->alloc_handle = ion_alloc(
+					ddl_context->video_ion_client,
+					alloc_size,
+					SZ_4K,
+					buff_addr->mem_type);
+		if (!buff_addr->alloc_handle) {
+			ERR("\n%s(): DDL ION alloc failed\n",
+					__func__);
+			goto bailout;
+		}
+		ret = ion_phys(ddl_context->video_ion_client,
+					buff_addr->alloc_handle,
+					&phyaddr,
+					&len);
+		if (ret || !phyaddr) {
+			ERR("\n%s(): DDL ION client physical failed\n",
+					__func__);
+			goto free_ion_buffer;
+		}
+		buff_addr->physical_base_addr = (u32 *)phyaddr;
+		kernel_vaddr = (unsigned long *) ion_map_kernel(
+					ddl_context->video_ion_client,
+					buff_addr->alloc_handle,
+					UNCACHED);
+		if (IS_ERR_OR_NULL(kernel_vaddr)) {
+			ERR("\n%s(): DDL ION map failed\n", __func__);
+			goto unmap_ion_buffer;
+		}
+		buff_addr->virtual_base_addr = (u32 *)kernel_vaddr;
+		DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\
+			"phys(0x%x), virt(0x%x), size(%u), align(%u), "\
+			"alloced_len(%u)", (u32)buff_addr->alloc_handle,
+			(u32)buff_addr->mem_type,
+			(u32)buff_addr->physical_base_addr,
+			(u32)buff_addr->virtual_base_addr,
+			alloc_size, align, len);
+	} else {
+		physical_addr = (u32)
+			allocate_contiguous_memory_nomap(alloc_size,
+						ddl_context->memtype, SZ_4K);
+		if (!physical_addr) {
+			ERR("\n%s(): DDL pmem allocate failed\n",
+			       __func__);
+			goto bailout;
+		}
+		buff_addr->physical_base_addr = (u32 *) physical_addr;
+		flags = MSM_SUBSYSTEM_MAP_KADDR;
+		buff_addr->mapped_buffer =
+		msm_subsystem_map_buffer((unsigned long)physical_addr,
+		alloc_size, flags, NULL, 0);
+		if (IS_ERR(buff_addr->mapped_buffer)) {
+			ERR("\n%s() buffer map failed\n", __func__);
+			goto free_pmem_buffer;
+		}
+		mapped_buffer = buff_addr->mapped_buffer;
+		if (!mapped_buffer->vaddr) {
+			ERR("\n%s() mapped virtual address is NULL\n",
+				__func__);
+			goto unmap_pmem_buffer;
+		}
+		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
+		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
+			" virt(0x%x), sz(%u), align(%u)",
+			(u32)buff_addr->mem_type,
+			(u32)buff_addr->physical_base_addr,
+			(u32)buff_addr->virtual_base_addr,
+			alloc_size, SZ_4K);
+	}
 
-	physical_addr = (u32)
-		allocate_contiguous_memory_nomap(alloc_size,
-					ddl_context->memtype, SZ_4K);
-
-	if (!physical_addr) {
-		pr_err("%s(): could not allocate kernel pmem buffers\n",
-		       __func__);
-		goto bailout;
-	}
-	buff_addr->physical_base_addr = (u32 *) physical_addr;
-	flags = MSM_SUBSYSTEM_MAP_KADDR;
-	buff_addr->mapped_buffer =
-	msm_subsystem_map_buffer((unsigned long)physical_addr,
-	alloc_size, flags, NULL, 0);
-	if (IS_ERR(buff_addr->mapped_buffer)) {
-		pr_err(" %s() buffer map failed", __func__);
-		goto free_acm_alloc;
-	}
-	mapped_buffer = buff_addr->mapped_buffer;
-	if (!mapped_buffer->vaddr) {
-		pr_err("%s() mapped virtual address is NULL", __func__);
-		goto free_map_buffers;
-	}
-	buff_addr->virtual_base_addr = mapped_buffer->vaddr;
 	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
 	buff_addr->buffer_size = sz;
-
-	buff_addr->align_physical_addr =
-	    (u32 *) ((physical_addr + guard_bytes) & align_mask);
-
-	align_offset =
-	    (u32) (buff_addr->align_physical_addr) - physical_addr;
-
+	buff_addr->align_physical_addr = (u32 *)
+		(((u32)buff_addr->physical_base_addr + guard_bytes) &
+		align_mask);
+	align_offset = (u32) (buff_addr->align_physical_addr) -
+		(u32)buff_addr->physical_base_addr;
 	buff_addr->align_virtual_addr =
 	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
 		     + align_offset);
-
-	DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
-		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
-		buff_addr->buffer_size);
-
+	DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\
+		" align_virt(0x%x)", __func__,
+		(u32)buff_addr->physical_base_addr,
+		(u32)buff_addr->align_physical_addr,
+		(u32)buff_addr->virtual_base_addr,
+		(u32)buff_addr->align_virtual_addr);
 	return;
-free_map_buffers:
-	msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
-free_acm_alloc:
-	free_contiguous_memory_by_paddr(
-		(unsigned long) physical_addr);
+
+unmap_pmem_buffer:
+	if (buff_addr->mapped_buffer)
+		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
+free_pmem_buffer:
+	if (buff_addr->physical_base_addr)
+		free_contiguous_memory_by_paddr((unsigned long)
+			buff_addr->physical_base_addr);
+	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
+	return;
+
+unmap_ion_buffer:
+	if (ddl_context->video_ion_client) {
+		if (buff_addr->alloc_handle)
+			ion_unmap_kernel(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+	}
+free_ion_buffer:
+	if (ddl_context->video_ion_client) {
+		if (buff_addr->alloc_handle)
+			ion_free(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+	}
 bailout:
-	buff_addr->physical_base_addr = NULL;
-	buff_addr->virtual_base_addr = NULL;
-	buff_addr->buffer_size = 0;
-	buff_addr->mapped_buffer = NULL;
+	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
 }
 
 void ddl_pmem_free(struct ddl_buf_addr *buff_addr)
 {
+	struct ddl_context *ddl_context;
+	ddl_context = ddl_get_context();
 	if (!buff_addr) {
 		ERR("\n %s() invalid arguments %p", __func__, buff_addr);
 		return;
 	}
-	DBG_PMEM("\n%s() IN: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
-		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
+	DBG("ddl_pmem_free: phys(0x%x) align_phys(0x%x), "\
+		"virt(0x%x), align_virt(0x%x), size(%u)",
+		(u32)buff_addr->physical_base_addr,
+		(u32)buff_addr->align_physical_addr,
+		(u32)buff_addr->virtual_base_addr,
+		(u32)buff_addr->align_virtual_addr,
 		buff_addr->buffer_size);
-
-	if (buff_addr->mapped_buffer)
-		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
-	if (buff_addr->physical_base_addr)
-		free_contiguous_memory_by_paddr(
-			(unsigned long) buff_addr->physical_base_addr);
-	DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
-		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
-		buff_addr->buffer_size);
-	buff_addr->buffer_size = 0;
-	buff_addr->physical_base_addr = NULL;
-	buff_addr->virtual_base_addr = NULL;
-	buff_addr->mapped_buffer = NULL;
+	if (ddl_context->video_ion_client) {
+		if (buff_addr->alloc_handle) {
+			ion_unmap_kernel(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+			ion_free(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+		}
+	} else {
+		if (buff_addr->mapped_buffer)
+			msm_subsystem_unmap_buffer(
+				buff_addr->mapped_buffer);
+		if (buff_addr->physical_base_addr)
+			free_contiguous_memory_by_paddr((unsigned long)
+				buff_addr->physical_base_addr);
+	}
+	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
 }
 #endif
 
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
index e51bf45..aee9dfe 100644
--- a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
@@ -677,8 +677,16 @@
 	return false;
 }
 
+static struct ion_client *res_trk_create_ion_client(void){
+	struct ion_client *video_client;
+	VCDRES_MSG_LOW("%s", __func__);
+	video_client = msm_ion_client_create(-1, "video_client");
+	return video_client;
+}
+
 void res_trk_init(struct device *device, u32 irq)
 {
+	VCDRES_MSG_LOW("%s", __func__);
 	if (resource_context.device || resource_context.irq_num ||
 		!device) {
 		VCDRES_MSG_ERROR("%s() Resource Tracker Init error\n",
@@ -695,9 +703,27 @@
 		(struct msm_vidc_platform_data *) device->platform_data;
 	if (resource_context.vidc_platform_data) {
 		resource_context.memtype =
-		resource_context.vidc_platform_data->memtype;
+			resource_context.vidc_platform_data->memtype;
+		VCDRES_MSG_LOW("%s(): resource_context.memtype = 0x%x",
+			__func__, (u32)resource_context.memtype);
+		if (resource_context.vidc_platform_data->enable_ion) {
+			resource_context.res_ion_client =
+				res_trk_create_ion_client();
+			if (!(resource_context.res_ion_client)) {
+				VCDRES_MSG_ERROR("%s()ION createfail\n",
+						__func__);
+				return;
+			}
+			VCDRES_MSG_LOW("%s(): ion_client = 0x%x", __func__,
+				(u32)resource_context.res_ion_client);
+		} else {
+			VCDRES_MSG_ERROR("%s(): ION not disabled\n",
+					__func__);
+		}
 	} else {
 		resource_context.memtype = -1;
+		VCDRES_MSG_ERROR("%s(): vidc_platform_data is NULL",
+			__func__);
 	}
 }
 
@@ -705,18 +731,23 @@
 	return resource_context.core_type;
 }
 
-u32 res_trk_get_mem_type(void){
-	return resource_context.memtype;
-}
-
 u32 res_trk_get_enable_ion(void)
 {
-	return 0;
+	if (resource_context.vidc_platform_data->enable_ion)
+		return 1;
+	else
+		return 0;
 }
 
 struct ion_client *res_trk_get_ion_client(void)
 {
-	return NULL;
+	return resource_context.res_ion_client;
+}
+
+u32 res_trk_get_mem_type(void)
+{
+	u32 mem_type = ION_HEAP(resource_context.memtype);
+	return mem_type;
 }
 
 void res_trk_set_mem_type(enum ddl_mem_area mem_type)
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
index 2b92a42..f8d9053 100644
--- a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
@@ -13,6 +13,7 @@
 #ifndef _VIDEO_720P_RESOURCE_TRACKER_H_
 #define _VIDEO_720P_RESOURCE_TRACKER_H_
 #include <mach/board.h>
+#include <linux/ion.h>
 #include "vcd_res_tracker_api.h"
 
 #define VCD_RESTRK_MIN_PERF_LEVEL 37900
@@ -36,6 +37,8 @@
 	u32 core_type;
 	int memtype;
 	u32 secure_session;
+	struct ion_client *res_ion_client;
+	enum ddl_mem_area res_mem_type;
 };
 
 #if DEBUG
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 634011b..927f19b 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -334,9 +334,9 @@
 		output_frame->aspect_ratio_info.aspect_ratio =
 			vcd_frame_data->aspect_ratio_info.aspect_ratio;
 		output_frame->aspect_ratio_info.par_width =
-			vcd_frame_data->aspect_ratio_info.extended_par_width;
+			vcd_frame_data->aspect_ratio_info.par_width;
 		output_frame->aspect_ratio_info.par_height =
-			vcd_frame_data->aspect_ratio_info.extended_par_height;
+			vcd_frame_data->aspect_ratio_info.par_height;
 		vdec_msg->vdec_msg_info.msgdatasize =
 		    sizeof(struct vdec_output_frameinfo);
 	} else {
@@ -915,7 +915,8 @@
 				 __func__);
 			goto import_ion_error;
 		}
-		if (res_trk_check_for_sec_session()) {
+		if (res_trk_check_for_sec_session() ||
+		   (res_trk_get_core_type() == (u32)VCD_CORE_720P)) {
 			rc = ion_phys(client_ctx->user_ion_client,
 				client_ctx->h264_mv_ion_handle,
 				(unsigned long *) (&(vcd_h264_mv_buffer->
@@ -1038,7 +1039,8 @@
 	if (!IS_ERR_OR_NULL(client_ctx->h264_mv_ion_handle)) {
 		ion_unmap_kernel(client_ctx->user_ion_client,
 					client_ctx->h264_mv_ion_handle);
-		if (!res_trk_check_for_sec_session()) {
+		if (!res_trk_check_for_sec_session() &&
+		   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 			ion_unmap_iommu(client_ctx->user_ion_client,
 				client_ctx->h264_mv_ion_handle,
 				VIDEO_DOMAIN,
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index 9450ee7..50cccbb 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -1844,7 +1844,8 @@
 				 __func__);
 			goto import_ion_error;
 		}
-		if (res_trk_check_for_sec_session()) {
+		if (res_trk_check_for_sec_session() ||
+		   (res_trk_get_core_type() == (u32)VCD_CORE_720P)) {
 			rc = ion_phys(client_ctx->user_ion_client,
 				client_ctx->recon_buffer_ion_handle[i],
 				&phy_addr, &ion_len);
@@ -1945,7 +1946,8 @@
 		if (client_ctx->recon_buffer_ion_handle[i]) {
 			ion_unmap_kernel(client_ctx->user_ion_client,
 				client_ctx->recon_buffer_ion_handle[i]);
-			if (!res_trk_check_for_sec_session()) {
+			if (!res_trk_check_for_sec_session() &&
+			   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 				ion_unmap_iommu(client_ctx->user_ion_client,
 				client_ctx->recon_buffer_ion_handle[i],
 				VIDEO_DOMAIN,
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index dcacb3c..c884cf5 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -432,7 +432,9 @@
 				ion_unmap_kernel(client_ctx->user_ion_client,
 						buf_addr_table[i].
 						buff_ion_handle);
-				if (!res_trk_check_for_sec_session()) {
+				if (!res_trk_check_for_sec_session() &&
+				   (res_trk_get_core_type() !=
+				   (u32)VCD_CORE_720P)) {
 					ion_unmap_iommu(
 						client_ctx->user_ion_client,
 						buf_addr_table[i].
@@ -456,7 +458,8 @@
 		if (!IS_ERR_OR_NULL(client_ctx->user_ion_client)) {
 			ion_unmap_kernel(client_ctx->user_ion_client,
 					client_ctx->h264_mv_ion_handle);
-			if (!res_trk_check_for_sec_session()) {
+			if (!res_trk_check_for_sec_session() &&
+			    (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 				ion_unmap_iommu(client_ctx->user_ion_client,
 					client_ctx->h264_mv_ion_handle,
 					VIDEO_DOMAIN,
@@ -652,7 +655,8 @@
 				*kernel_vaddr = (unsigned long)NULL;
 				goto ion_free_error;
 			}
-			if (res_trk_check_for_sec_session()) {
+			if (res_trk_check_for_sec_session() ||
+			   (res_trk_get_core_type() == (u32)VCD_CORE_720P)) {
 				if (ion_phys(client_ctx->user_ion_client,
 					buff_ion_handle,
 					&phys_addr, &ion_len)) {
@@ -780,7 +784,7 @@
 		*num_of_buffers = *num_of_buffers + 1;
 		DBG("%s() : client_ctx = %p, user_virt_addr = 0x%08lx, "
 			"kernel_vaddr = 0x%08lx inserted!", __func__,
-			client_ctx, user_vaddr, *kernel_vaddr);
+			client_ctx, user_vaddr, kernel_vaddr);
 	}
 	mutex_unlock(&client_ctx->enrty_queue_lock);
 	return true;
@@ -833,7 +837,8 @@
 	if (buf_addr_table[i].buff_ion_handle) {
 		ion_unmap_kernel(client_ctx->user_ion_client,
 				buf_addr_table[i].buff_ion_handle);
-		if (!res_trk_check_for_sec_session()) {
+		if (!res_trk_check_for_sec_session() &&
+		   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 			ion_unmap_iommu(client_ctx->user_ion_client,
 				buf_addr_table[i].buff_ion_handle,
 				VIDEO_DOMAIN,
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_api.c b/drivers/video/msm/vidc/common/vcd/vcd_api.c
index c66c2b7..0dbbf57 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_api.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_api.c
@@ -13,6 +13,7 @@
 
 #include <linux/export.h>
 #include <media/msm/vidc_type.h>
+#include <media/msm/vidc_init.h>
 #include "vcd.h"
 
 u32 vcd_init(struct vcd_init_config *config, s32 *driver_handle)
@@ -157,7 +158,7 @@
 		       void *handle, void *const client_data),
 	void *client_data, int flags)
 {
-	u32 rc = 0;
+	u32 rc = 0, num_of_instances = 0;
 	struct vcd_drv_ctxt *drv_ctxt;
 	struct vcd_clnt_ctxt *cctxt;
 	int is_secure = (flags & VCD_CP_SESSION) ? 1 : 0;
@@ -167,6 +168,17 @@
 		VCD_MSG_ERROR("Bad parameters");
 		return -EINVAL;
 	}
+
+	drv_ctxt = vcd_get_drv_context();
+	cctxt = drv_ctxt->dev_ctxt.cctxt_list_head;
+	while (cctxt) {
+		num_of_instances++;
+		cctxt = cctxt->next;
+	}
+	if (num_of_instances == VIDC_MAX_NUM_CLIENTS) {
+		pr_err(" %s(): Max number of clients reached\n", __func__);
+		return -ENODEV;
+	}
 	rc = is_session_invalid(decoding, flags);
 	if (rc) {
 		VCD_MSG_ERROR("Invalid Session: is_decoder: %d, secure: %d\n",
@@ -175,7 +187,6 @@
 	}
 	if (is_secure)
 		res_trk_secure_set();
-	drv_ctxt = vcd_get_drv_context();
 	mutex_lock(&drv_ctxt->dev_mutex);
 
 	if (drv_ctxt->dev_state.state_table->ev_hdlr.open) {
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
index 79bcac0..8126a0e 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_core.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -25,7 +25,7 @@
 
 #define VCD_MIN_PERF_LEVEL                   37900
 
-#define VCD_DRIVER_INSTANCE_MAX              4
+#define VCD_DRIVER_CLIENTS_MAX              6
 
 #define VCD_MAX_CLIENT_TRANSACTIONS          32
 
@@ -126,7 +126,7 @@
 
 	struct vcd_init_config config;
 
-	u32 driver_ids[VCD_DRIVER_INSTANCE_MAX];
+	u32 driver_ids[VCD_DRIVER_CLIENTS_MAX];
 	u32 refs;
 	u8 *device_base_addr;
 	void *hw_timer_handle;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
index 96e729d..53495e0 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
@@ -537,12 +537,12 @@
 	*driver_handle = 0;
 
 	driver_id = 0;
-	while (driver_id < VCD_DRIVER_INSTANCE_MAX &&
+	while (driver_id < VCD_DRIVER_CLIENTS_MAX &&
 		   dev_ctxt->driver_ids[driver_id]) {
 		++driver_id;
 	}
 
-	if (driver_id == VCD_DRIVER_INSTANCE_MAX) {
+	if (driver_id == VCD_DRIVER_CLIENTS_MAX) {
 		VCD_MSG_ERROR("Max driver instances reached");
 
 		return VCD_ERR_FAIL;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index 5b64f20..28ea453 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -41,6 +41,8 @@
 	unsigned long buffer_size = 0;
 	int ret = 0;
 	unsigned long ionflag = 0;
+	ion_phys_addr_t phyaddr = 0;
+	size_t len = 0;
 
 	if (!kernel_vaddr || !phy_addr || !cctxt) {
 		pr_err("\n%s: Invalid parameters", __func__);
@@ -84,6 +86,9 @@
 		}
 		*phy_addr = (u8 *) mapped_buffer->iova[0];
 		*kernel_vaddr = (u8 *) mapped_buffer->vaddr;
+		VCD_MSG_LOW("vcd_pmem_alloc: phys(0x%x), virt(0x%x), "\
+			"sz(%u), flags(0x%x)", (u32)*phy_addr,
+			(u32)*kernel_vaddr, sz, (u32)flags);
 	} else {
 		map_buffer->alloc_handle = ion_alloc(
 			    cctxt->vcd_ion_client, sz, SZ_4K,
@@ -106,7 +111,8 @@
 			pr_err("%s() ION map failed", __func__);
 			goto ion_free_bailout;
 		}
-		ret = ion_map_iommu(cctxt->vcd_ion_client,
+		if (res_trk_get_core_type() != (u32)VCD_CORE_720P) {
+			ret = ion_map_iommu(cctxt->vcd_ion_client,
 				map_buffer->alloc_handle,
 				VIDEO_DOMAIN,
 				VIDEO_MAIN_POOL,
@@ -115,18 +121,32 @@
 				(unsigned long *)&iova,
 				(unsigned long *)&buffer_size,
 				UNCACHED, 0);
-		if (ret) {
-			pr_err("%s() ION iommu map failed", __func__);
-			goto ion_map_bailout;
+			if (ret) {
+				pr_err("%s() ION iommu map failed", __func__);
+				goto ion_map_bailout;
+			}
+			map_buffer->phy_addr = iova;
+		} else {
+			ret = ion_phys(cctxt->vcd_ion_client,
+				map_buffer->alloc_handle,
+				&phyaddr,
+				&len);
+			if (ret) {
+				pr_err("%s() ion_phys failed", __func__);
+				goto ion_map_bailout;
+			}
+			map_buffer->phy_addr = phyaddr;
 		}
-		map_buffer->phy_addr = iova;
 		if (!map_buffer->phy_addr) {
 			pr_err("%s() acm alloc failed", __func__);
 			goto free_map_table;
 		}
-		*phy_addr = (u8 *)iova;
+		*phy_addr = (u8 *)map_buffer->phy_addr;
 		mapped_buffer = NULL;
 		map_buffer->mapped_buffer = NULL;
+		VCD_MSG_LOW("vcd_ion_alloc: phys(0x%x), virt(0x%x), "\
+			"sz(%u), ionflags(0x%x)", (u32)*phy_addr,
+			(u32)*kernel_vaddr, sz, (u32)ionflag);
 	}
 
 	return 0;
@@ -176,10 +196,13 @@
 	if (map_buffer->mapped_buffer)
 		msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
 	if (cctxt->vcd_enable_ion) {
+		VCD_MSG_LOW("vcd_ion_free: phys(0x%x), virt(0x%x)",
+			(u32)phy_addr, (u32)kernel_vaddr);
 		if (map_buffer->alloc_handle) {
 			ion_unmap_kernel(cctxt->vcd_ion_client,
 					map_buffer->alloc_handle);
-			ion_unmap_iommu(cctxt->vcd_ion_client,
+			if (res_trk_get_core_type() != (u32)VCD_CORE_720P)
+				ion_unmap_iommu(cctxt->vcd_ion_client,
 					map_buffer->alloc_handle,
 					VIDEO_DOMAIN,
 					VIDEO_MAIN_POOL);
@@ -187,6 +210,8 @@
 			map_buffer->alloc_handle);
 		}
 	} else {
+		VCD_MSG_LOW("vcd_pmem_free: phys(0x%x), virt(0x%x)",
+			(u32)phy_addr, (u32)kernel_vaddr);
 		free_contiguous_memory_by_paddr(
 			(unsigned long)map_buffer->phy_addr);
 	}
@@ -1228,7 +1253,7 @@
 	driver_handle--;
 
 	if (driver_handle < 0 ||
-		driver_handle >= VCD_DRIVER_INSTANCE_MAX ||
+		driver_handle >= VCD_DRIVER_CLIENTS_MAX ||
 		!dev_ctxt->driver_ids[driver_handle]) {
 		return false;
 	} else {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5aa43c3..7e6fd75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -172,7 +172,7 @@
 }
 
 /**
- * virtqueue_add_buf - expose buffer to other end
+ * vring_add_buf - expose buffer to other end
  * @vq: the struct virtqueue we're talking about.
  * @sg: the description of the buffer(s).
  * @out_num: the number of sg readable by other side
@@ -188,7 +188,7 @@
  * positive return values as "available": indirect buffers mean that
  * we can put an entire sg[] array inside a single queue entry.
  */
-int virtqueue_add_buf(struct virtqueue *_vq,
+static int vring_add_buf(struct virtqueue *_vq,
 		      struct scatterlist sg[],
 		      unsigned int out,
 		      unsigned int in,
@@ -288,20 +288,19 @@
 
 	return vq->num_free;
 }
-EXPORT_SYMBOL_GPL(virtqueue_add_buf);
 
 /**
- * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * vring_kick_prepare - first half of split vring_kick call.
  * @vq: the struct virtqueue
  *
- * Instead of virtqueue_kick(), you can do:
- *	if (virtqueue_kick_prepare(vq))
- *		virtqueue_notify(vq);
+ * Instead of vring_kick(), you can do:
+ *	if (vring_kick_prepare(vq))
+ *		vring_kick_notify(vq);
  *
- * This is sometimes useful because the virtqueue_kick_prepare() needs
- * to be serialized, but the actual virtqueue_notify() call does not.
+ * This is sometimes useful because the vring_kick_prepare() needs
+ * to be serialized, but the actual vring_kick_notify() call does not.
  */
-bool virtqueue_kick_prepare(struct virtqueue *_vq)
+static bool vring_kick_prepare(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	u16 new, old;
@@ -333,39 +332,36 @@
 	END_USE(vq);
 	return needs_kick;
 }
-EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
 
 /**
- * virtqueue_notify - second half of split virtqueue_kick call.
+ * vring_kick_notify - second half of split virtqueue_kick call.
  * @vq: the struct virtqueue
  *
  * This does not need to be serialized.
  */
-void virtqueue_notify(struct virtqueue *_vq)
+static void vring_kick_notify(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	/* Prod other side to tell it about changes. */
 	vq->notify(_vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_notify);
 
 /**
- * virtqueue_kick - update after add_buf
+ * vring_kick - update after add_buf
  * @vq: the struct virtqueue
  *
- * After one or more virtqueue_add_buf calls, invoke this to kick
+ * After one or more vring_add_buf calls, invoke this to kick
  * the other side.
  *
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-void virtqueue_kick(struct virtqueue *vq)
+static void vring_kick(struct virtqueue *vq)
 {
-	if (virtqueue_kick_prepare(vq))
-		virtqueue_notify(vq);
+	if (vring_kick_prepare(vq))
+		vring_kick_notify(vq);
 }
-EXPORT_SYMBOL_GPL(virtqueue_kick);
 
 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
 {
@@ -398,7 +394,7 @@
 }
 
 /**
- * virtqueue_get_buf - get the next used buffer
+ * vring_get_buf - get the next used buffer
  * @vq: the struct virtqueue we're talking about.
  * @len: the length written into the buffer
  *
@@ -411,9 +407,9 @@
  * operations at the same time (except where noted).
  *
  * Returns NULL if there are no used buffers, or the "data" token
- * handed to virtqueue_add_buf().
+ * handed to vring_add_buf().
  */
-void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
+static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	void *ret;
@@ -468,10 +464,9 @@
 	END_USE(vq);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(virtqueue_get_buf);
 
 /**
- * virtqueue_disable_cb - disable callbacks
+ * vring_disable_cb - disable callbacks
  * @vq: the struct virtqueue we're talking about.
  *
  * Note that this is not necessarily synchronous, hence unreliable and only
@@ -479,16 +474,15 @@
  *
  * Unlike other operations, this need not be serialized.
  */
-void virtqueue_disable_cb(struct virtqueue *_vq)
+static void vring_disable_cb(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 }
-EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
 
 /**
- * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * vring_enable_cb - restart callbacks after disable_cb.
  * @vq: the struct virtqueue we're talking about.
  *
  * This re-enables callbacks; it returns "false" if there are pending
@@ -498,7 +492,7 @@
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-bool virtqueue_enable_cb(struct virtqueue *_vq)
+static bool vring_enable_cb(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 
@@ -520,10 +514,9 @@
 	END_USE(vq);
 	return true;
 }
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
 
 /**
- * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * vring_enable_cb_delayed - restart callbacks after disable_cb.
  * @vq: the struct virtqueue we're talking about.
  *
  * This re-enables callbacks but hints to the other side to delay
@@ -535,7 +528,7 @@
  * Caller must ensure we don't call this with other virtqueue
  * operations at the same time (except where noted).
  */
-bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+static bool vring_enable_cb_delayed(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	u16 bufs;
@@ -560,17 +553,16 @@
 	END_USE(vq);
 	return true;
 }
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
 
 /**
- * virtqueue_detach_unused_buf - detach first unused buffer
+ * vring_detach_unused_buf - detach first unused buffer
  * @vq: the struct virtqueue we're talking about.
  *
- * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * Returns NULL or the "data" token handed to vring_add_buf().
  * This is not valid on an active queue; it is useful only for device
  * shutdown.
  */
-void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
+static void *vring_detach_unused_buf(struct virtqueue *_vq)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
 	unsigned int i;
@@ -594,7 +586,6 @@
 	END_USE(vq);
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
 
 irqreturn_t vring_interrupt(int irq, void *_vq)
 {
@@ -616,6 +607,34 @@
 }
 EXPORT_SYMBOL_GPL(vring_interrupt);
 
+/**
+ * get_vring_size - return the size of the virtqueue's vring
+ * @vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring.  This is mainly used for boasting to
+ * userspace.  Unlike other operations, this need not be serialized.
+ */
+static unsigned int get_vring_size(struct virtqueue *_vq)
+{
+
+	struct vring_virtqueue *vq = to_vvq(_vq);
+
+	return vq->vring.num;
+}
+
+static struct virtqueue_ops vring_vq_ops = {
+	.add_buf = vring_add_buf,
+	.get_buf = vring_get_buf,
+	.kick = vring_kick,
+	.kick_prepare = vring_kick_prepare,
+	.kick_notify = vring_kick_notify,
+	.disable_cb = vring_disable_cb,
+	.enable_cb = vring_enable_cb,
+	.enable_cb_delayed = vring_enable_cb_delayed,
+	.detach_unused_buf = vring_detach_unused_buf,
+	.get_impl_size = get_vring_size,
+};
+
 struct virtqueue *vring_new_virtqueue(unsigned int num,
 				      unsigned int vring_align,
 				      struct virtio_device *vdev,
@@ -641,6 +660,7 @@
 	vring_init(&vq->vring, num, pages, vring_align);
 	vq->vq.callback = callback;
 	vq->vq.vdev = vdev;
+	vq->vq.vq_ops = &vring_vq_ops;
 	vq->vq.name = name;
 	vq->notify = notify;
 	vq->weak_barriers = weak_barriers;
@@ -699,20 +719,4 @@
 }
 EXPORT_SYMBOL_GPL(vring_transport_features);
 
-/**
- * virtqueue_get_vring_size - return the size of the virtqueue's vring
- * @vq: the struct virtqueue containing the vring of interest.
- *
- * Returns the size of the vring.  This is mainly used for boasting to
- * userspace.  Unlike other operations, this need not be serialized.
- */
-unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
-{
-
-	struct vring_virtqueue *vq = to_vvq(_vq);
-
-	return vq->vring.num;
-}
-EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
-
 MODULE_LICENSE("GPL");
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
index 8e8c55b..4dd618f 100644
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -497,8 +497,16 @@
 
 	if (ret_val == YAFFS_OK) {
 		if (target) {
-			drop_nlink(new_dentry->d_inode);
-			mark_inode_dirty(new_dentry->d_inode);
+			/*
+			 * We have identified target to be a
+			 * valid directory earlier. If it is
+			 * not the case throw a warning.
+			 */
+			WARN_ON(!new_dentry->d_inode);
+			if (new_dentry->d_inode) {
+				drop_nlink(new_dentry->d_inode);
+				mark_inode_dirty(new_dentry->d_inode);
+			}
 		}
 
 		update_dir_time(old_dir);
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index fbffdd2..7769950 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -24,6 +24,7 @@
 #define MEMORY_DEVICE_MODE		2
 #define NO_LOGGING_MODE			3
 #define UART_MODE			4
+#define SOCKET_MODE			5
 
 /* different values that go in for diag_data_type */
 #define DATA_TYPE_EVENT         	0
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index fca8700..b00e050 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -89,6 +89,10 @@
  * @get_batt_capacity_percent:
  *			a board specific function to return battery
  *			capacity. If null - a default one will be used
+ * @dc_unplug_check:	enables the reverse boosting fix for the DC_IN line
+ *			however, this should only be enabled for devices which
+ *			control the DC OVP FETs otherwise this option should
+ *			remain disabled
  * @trkl_voltage:	the trkl voltage in (mV) below which hw controlled
  *			 trkl charging happens with linear charger
  * @weak_voltage:	the weak voltage (mV) below which hw controlled
@@ -137,6 +141,7 @@
 	int64_t				batt_id_min;
 	int64_t				batt_id_max;
 	bool				keep_btm_on_suspend;
+	bool				dc_unplug_check;
 	int				trkl_voltage;
 	int				weak_voltage;
 	int				trkl_current;
diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h b/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
index 93c21ce..9619527 100644
--- a/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
+++ b/include/linux/mfd/wcd9xxx/wcd9xxx-slimslave.h
@@ -16,10 +16,6 @@
 #include <linux/slimbus/slimbus.h>
 #include <linux/mfd/wcd9xxx/core.h>
 
-/* Local to the core only */
-#define SLIM_MAX_RX_PORTS 7
-#define SLIM_MAX_TX_PORTS 10
-
 /* Channel numbers to be used for each port */
 enum {
 	SLIM_TX_1   = 128,
@@ -43,20 +39,43 @@
 };
 
 /*
- *  client is expected to give port ids in the range of 1-10 for Tx ports and
- *  1-7 for Rx ports, we need to add offset for getting the absolute slave
+ *  client is expected to give port ids in the range of
+ *  1-10 for pre Taiko Tx ports and 1-16 for Taiko
+ *  1-7 for pre Taiko Rx ports and 1-16 for Tako,
+ *  we need to add offset for getting the absolute slave
  *  port id before configuring the HW
  */
-#define SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS 10
-#define SB_PGD_OFFSET_OF_TX_SLAVE_DEV_PORTS     -1
-#define SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS 7
-#define SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS     9
+#define TABLA_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS 10
+#define TAIKO_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS 16
+
+#define SLIM_MAX_TX_PORTS TAIKO_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS
+
+#define TABLA_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS \
+	TABLA_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS
+#define TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS \
+	TAIKO_SB_PGD_MAX_NUMBER_OF_TX_SLAVE_DEV_PORTS
+
+#define TABLA_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS 7
+#define TAIKO_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS 13
+
+#define SLIM_MAX_RX_PORTS TAIKO_SB_PGD_MAX_NUMBER_OF_RX_SLAVE_DEV_PORTS
+
+#define TABLA_SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID \
+	TABLA_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS
+#define TAIKO_SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID \
+	TAIKO_SB_PGD_OFFSET_OF_RX_SLAVE_DEV_PORTS
+
+#define TABLA_SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID 16
+#define TAIKO_SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID 31
+
+#define TABLA_SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID 9
+#define TAIKO_SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID 15
 
 /* below details are taken from SLIMBUS slave SWI */
 #define SB_PGD_PORT_BASE 0x000
 
-#define SB_PGD_PORT_CFG_BYTE_ADDR(port_num) \
-		(SB_PGD_PORT_BASE + 0x040 + 1*port_num)
+#define SB_PGD_PORT_CFG_BYTE_ADDR(offset, port_num) \
+		(SB_PGD_PORT_BASE + offset + (1 * port_num))
 
 #define SB_PGD_TX_PORT_MULTI_CHANNEL_0(port_num) \
 		(SB_PGD_PORT_BASE + 0x100 + 4*port_num)
@@ -66,12 +85,9 @@
 #define SB_PGD_TX_PORT_MULTI_CHANNEL_1(port_num) \
 		(SB_PGD_PORT_BASE + 0x101 + 4*port_num)
 #define SB_PGD_TX_PORT_MULTI_CHANNEL_1_START_PORT_ID   8
-#define SB_PGD_TX_PORT_MULTI_CHANNEL_1_END_PORT_ID     9
 
-#define SB_PGD_RX_PORT_MULTI_CHANNEL_0(port_num) \
-		(SB_PGD_PORT_BASE + 0x180 + 4*port_num)
-#define SB_PGD_RX_PORT_MULTI_CHANNEL_0_START_PORT_ID   10
-#define SB_PGD_RX_PORT_MULTI_CHANNEL_0_END_PORT_ID     16
+#define SB_PGD_RX_PORT_MULTI_CHANNEL_0(offset, port_num) \
+		(SB_PGD_PORT_BASE + offset + (4 * port_num))
 
 /* slave port water mark level
  *   (0: 6bytes, 1: 9bytes, 2: 12 bytes, 3: 15 bytes)
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 4c42623..2519a6e 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -68,10 +68,8 @@
 						struct msmfb_data)
 #define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
 #define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
-
-#define MSMFB_OVERLAY_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
-
-
+#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+#define MSMFB_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
 #define FB_TYPE_3D_PANEL 0x10101010
 #define MDP_IMGTYPE2_START 0x10000
 #define MSMFB_DRIVER_VERSION	0xF9E8D701
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 32d8ec2..cb56293 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -210,6 +210,7 @@
 	int use_for_apm;
 };
 
+#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
 extern struct power_supply *power_supply_get_by_name(char *name);
 extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
@@ -218,10 +219,31 @@
 extern int power_supply_set_online(struct power_supply *psy, bool enable);
 extern int power_supply_set_scope(struct power_supply *psy, int scope);
 extern int power_supply_set_charge_type(struct power_supply *psy, int type);
-
-#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+extern int power_supply_set_supply_type(struct power_supply *psy,
+					enum power_supply_type supply_type);
 extern int power_supply_is_system_supplied(void);
 #else
+static inline struct power_supply *power_supply_get_by_name(char *name)
+							{ return -ENOSYS; }
+static inline int power_supply_am_i_supplied(struct power_supply *psy)
+							{ return -ENOSYS; }
+static inline int power_supply_set_battery_charged(struct power_supply *psy)
+							{ return -ENOSYS; }
+static inline int power_supply_set_current_limit(struct power_supply *psy,
+							int limit)
+							{ return -ENOSYS; }
+static inline int power_supply_set_online(struct power_supply *psy,
+							bool enable)
+							{ return -ENOSYS; }
+static inline int power_supply_set_scope(struct power_supply *psy,
+							int scope)
+							{ return -ENOSYS; }
+static inline int power_supply_set_charge_type(struct power_supply *psy,
+							int type)
+							{ return -ENOSYS; }
+static inline int power_supply_set_supply_type(struct power_supply *psy,
+					enum power_supply_type supply_type);
+							{ return -ENOSYS; }
 static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
 #endif
 
diff --git a/include/linux/qpnp/pwm.h b/include/linux/qpnp/pwm.h
index de89a37..50c15e9 100644
--- a/include/linux/qpnp/pwm.h
+++ b/include/linux/qpnp/pwm.h
@@ -114,6 +114,18 @@
 int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value);
 
 /*
+ * enum pm_pwm_mode - PWM mode selection
+ * %PM_PWM_MODE_PWM - Select PWM mode
+ * %PM_PWM_MODE_LPG - Select LPG mode
+ */
+enum pm_pwm_mode {
+	PM_PWM_MODE_PWM,
+	PM_PWM_MODE_LPG,
+};
+
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode);
+
+/*
  * lut_params: Lookup table (LUT) parameters
  * @start_idx: start index in lookup table from 0 to MAX-1
  * @idx_len: number of index
@@ -134,8 +146,6 @@
 int pwm_lut_config(struct pwm_device *pwm, int period_us,
 		int duty_pct[], struct lut_params lut_params);
 
-int pwm_lut_enable(struct pwm_device *pwm, int start);
-
 /* Standard APIs supported */
 /*
  * pwm_request - request a PWM device
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 3b1d06d..b9ecd60 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -376,6 +376,7 @@
 
 struct msm_usb_host_platform_data {
 	unsigned int power_budget;
+	int pmic_gpio_dp_irq;
 	unsigned int dock_connect_irq;
 };
 
@@ -389,21 +390,46 @@
 	bool core_clk_always_on_workaround;
 };
 
+/**
+ * struct usb_bam_pipe_connect: pipe connection information
+ * between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
+ * either src BAM or dst BAM
+ * @src_phy_addr: src bam physical address.
+ * @src_pipe_index: src bam pipe index.
+ * @dst_phy_addr: dst bam physical address.
+ * @dst_pipe_index: dst bam pipe index.
+ * @data_fifo_base_offset: data fifo offset.
+ * @data_fifo_size: data fifo size.
+ * @desc_fifo_base_offset: descriptor fifo offset.
+ * @desc_fifo_size: descriptor fifo size.
+ */
 struct usb_bam_pipe_connect {
 	u32 src_phy_addr;
-	int src_pipe_index;
+	u32 src_pipe_index;
 	u32 dst_phy_addr;
-	int dst_pipe_index;
+	u32 dst_pipe_index;
 	u32 data_fifo_base_offset;
 	u32 data_fifo_size;
 	u32 desc_fifo_base_offset;
 	u32 desc_fifo_size;
 };
 
+/**
+ * struct msm_usb_bam_platform_data: pipe connection information
+ * between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
+ * either src BAM or dst BAM
+ * @connections: holds all pipe connections data.
+ * @usb_active_bam: set USB or HSIC as the active BAM.
+ * @usb_bam_num_pipes: max number of pipes to use.
+ * @active_conn_num: number of active pipe connections.
+ * @usb_base_address: BAM physical address.
+ */
 struct msm_usb_bam_platform_data {
 	struct usb_bam_pipe_connect *connections;
 	int usb_active_bam;
 	int usb_bam_num_pipes;
+	u32 total_bam_num;
+	u32 usb_base_address;
 };
 
 enum usb_bam {
@@ -411,8 +437,27 @@
 	HSIC_BAM,
 };
 
+#ifdef CONFIG_USB_DWC3_MSM
 int msm_ep_config(struct usb_ep *ep);
 int msm_ep_unconfig(struct usb_ep *ep);
-int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size);
+int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size,
+	u8 dst_pipe_idx);
 
+#else
+static inline int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size,
+	u8 dst_pipe_idx)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ep_config(struct usb_ep *ep)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ep_unconfig(struct usb_ep *ep)
+{
+	return -ENODEV;
+}
+#endif
 #endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8efd28a..0d0f6d3 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -15,6 +15,7 @@
  * @callback: the function to call when buffers are consumed (can be NULL).
  * @name: the name of this virtqueue (mainly for debugging)
  * @vdev: the virtio device this queue was created for.
+ * @vq_ops: the operations for this virtqueue (see below).
  * @priv: a pointer for the virtqueue implementation to use.
  */
 struct virtqueue {
@@ -22,33 +23,235 @@
 	void (*callback)(struct virtqueue *vq);
 	const char *name;
 	struct virtio_device *vdev;
+	struct virtqueue_ops *vq_ops;
 	void *priv;
 };
 
-int virtqueue_add_buf(struct virtqueue *vq,
-		      struct scatterlist sg[],
-		      unsigned int out_num,
-		      unsigned int in_num,
-		      void *data,
-		      gfp_t gfp);
+/**
+ * virtqueue_ops - operations for virtqueue abstraction layer
+ * @add_buf: expose buffer to other end
+ *	vq: the struct virtqueue we're talking about.
+ *	sg: the description of the buffer(s).
+ *	out_num: the number of sg readable by other side
+ *	in_num: the number of sg which are writable (after readable ones)
+ *	data: the token identifying the buffer.
+ *	Returns remaining capacity of queue (sg segments) or a negative error.
+ * @kick: update after add_buf
+ *	vq: the struct virtqueue
+ *	After one or more add_buf calls, invoke this to kick the other side.
+ * @get_buf: get the next used buffer
+ *	vq: the struct virtqueue we're talking about.
+ *	len: the length written into the buffer
+ *	Returns NULL or the "data" token handed to add_buf.
+ * @disable_cb: disable callbacks
+ *	vq: the struct virtqueue we're talking about.
+ *	Note that this is not necessarily synchronous, hence unreliable and only
+ *	useful as an optimization.
+ * @enable_cb: restart callbacks after disable_cb.
+ *	vq: the struct virtqueue we're talking about.
+ *	This re-enables callbacks; it returns "false" if there are pending
+ *	buffers in the queue, to detect a possible race between the driver
+ *	checking for more work, and enabling callbacks.
+ * @enable_cb_delayed: restart callbacks after disable_cb.
+ *	vq: the struct virtqueue we're talking about.
+ *	This re-enables callbacks but hints to the other side to delay
+ *	interrupts until most of the available buffers have been processed;
+ *	it returns "false" if there are many pending buffers in the queue,
+ *	to detect a possible race between the driver checking for more work,
+ *	and enabling callbacks.
+ *	Caller must ensure we don't call this with other virtqueue
+ *	operations at the same time (except where noted).
+ * @detach_unused_buf: detach first unused buffer
+ *	vq: the struct virtqueue we're talking about.
+ *	Returns NULL or the "data" token handed to add_buf
+ * @get_impl_size: return the size of the virtqueue's implementation
+ *	vq: the struct virtqueue containing the implementation of interest.
+ *	Returns the size of the implementation. This is mainly used for
+ *	boasting to userspace. Unlike other operations, this need not
+ *	be serialized.
+ *
+ * Locking rules are straightforward: the driver is responsible for
+ * locking.  No two operations may be invoked simultaneously, with the exception
+ * of @disable_cb.
+ *
+ * All operations can be called in any context.
+ */
+struct virtqueue_ops {
+	int (*add_buf)(struct virtqueue *vq,
+		       struct scatterlist sg[],
+		       unsigned int out_num,
+		       unsigned int in_num,
+		       void *data,
+		       gfp_t gfp);
 
-void virtqueue_kick(struct virtqueue *vq);
+	void (*kick)(struct virtqueue *vq);
+	bool (*kick_prepare)(struct virtqueue *vq);
+	void (*kick_notify)(struct virtqueue *vq);
+	void *(*get_buf)(struct virtqueue *vq, unsigned int *len);
+	void (*disable_cb)(struct virtqueue *vq);
+	bool (*enable_cb)(struct virtqueue *vq);
+	bool (*enable_cb_delayed)(struct virtqueue *vq);
+	void *(*detach_unused_buf)(struct virtqueue *vq);
+	unsigned int (*get_impl_size)(struct virtqueue *vq);
+};
 
-bool virtqueue_kick_prepare(struct virtqueue *vq);
+/**
+ * virtqueue_add_buf - expose buffer to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: the description of the buffer(s).
+ * @out_num: the number of sg readable by other side
+ * @in_num: the number of sg which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns remaining capacity of queue or a negative error.
+ */
+static inline int virtqueue_add_buf(struct virtqueue *vq,
+				struct scatterlist sg[],
+				unsigned int out_num,
+				unsigned int in_num,
+				void *data,
+				gfp_t gfp)
+{
+	return vq->vq_ops->add_buf(vq, sg, out_num, in_num, data, gfp);
+}
+/**
+ * virtqueue_kick - update after add_buf
+ * @vq: the struct virtqueue
+ *
+ * After one or more virtqueue_add_buf calls, invoke this to kick
+ * the other side.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline void virtqueue_kick(struct virtqueue *vq)
+{
+	vq->vq_ops->kick(vq);
+}
 
-void virtqueue_notify(struct virtqueue *vq);
+/**
+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * Instead of virtqueue_kick(), you can do:
+ *	if (virtqueue_kick_prepare(vq))
+ *		virtqueue_kick_notify(vq);
+ *
+ * This is sometimes useful because the virtqueue_kick_prepare() needs
+ * to be serialized, but the actual virtqueue_kick_notify() call does not.
+ */
+static inline bool virtqueue_kick_prepare(struct virtqueue *vq)
+{
+	return vq->vq_ops->kick_prepare(vq);
+}
 
-void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+/**
+ * virtqueue_kick_notify - second half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ */
+static inline void virtqueue_kick_notify(struct virtqueue *vq)
+{
+	vq->vq_ops->kick_notify(vq);
+}
 
-void virtqueue_disable_cb(struct virtqueue *vq);
+/**
+ * virtqueue_get_buf - get the next used buffer
+ * @vq: the struct virtqueue we're talking about.
+ * @len: the length written into the buffer
+ *
+ * If the driver wrote data into the buffer, @len will be set to the
+ * amount written.  This means you don't need to clear the buffer
+ * beforehand to ensure there's no data leakage in the case of short
+ * writes.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns NULL if there are no used buffers, or the "data" token
+ * handed to virtqueue_add_buf().
+ */
+static inline void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
+{
+	return vq->vq_ops->get_buf(vq, len);
+}
 
-bool virtqueue_enable_cb(struct virtqueue *vq);
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
+static inline void virtqueue_disable_cb(struct virtqueue *vq)
+{
+	vq->vq_ops->disable_cb(vq);
+}
 
-bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb(struct virtqueue *vq)
+{
+	return vq->vq_ops->enable_cb(vq);
+}
 
-void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
+{
+	return vq->vq_ops->enable_cb_delayed(vq);
+}
 
-unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+/**
+ * virtqueue_detach_unused_buf - detach first unused buffer
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * This is not valid on an active queue; it is useful only for device
+ * shutdown.
+ */
+static inline void *virtqueue_detach_unused_buf(struct virtqueue *vq)
+{
+	return vq->vq_ops->detach_unused_buf(vq);
+}
+
+/**
+ * virtqueue_get_impl_size - return the size of the virtqueue's implementation
+ * @vq: the struct virtqueue containing the implementation of interest.
+ *
+ * Returns the size of the virtqueue implementation.  This is mainly used
+ * for boasting to userspace.  Unlike other operations, this need not
+ * be serialized.
+ */
+static inline unsigned int virtqueue_get_impl_size(struct virtqueue *vq)
+{
+	return vq->vq_ops->get_impl_size(vq);
+}
 
 /**
  * virtio_device - representation of a device using virtio
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index 46a5b1b..295be8f 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -48,6 +48,8 @@
 int free_riva_power_on_lock(char *driver_name);
 unsigned int wcnss_get_serial_number(void);
 void wcnss_flush_delayed_boot_votes(void);
+void wcnss_allow_suspend(void);
+void wcnss_prevent_suspend(void);
 #define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
 #define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
 
diff --git a/include/media/msm/vcd_api.h b/include/media/msm/vcd_api.h
index c93b696..7104028 100644
--- a/include/media/msm/vcd_api.h
+++ b/include/media/msm/vcd_api.h
@@ -55,8 +55,8 @@
 
 struct vcd_aspect_ratio {
 	u32 aspect_ratio;
-	u32 extended_par_width;
-	u32 extended_par_height;
+	u32 par_width;
+	u32 par_height;
 };
 
 struct vcd_frame_data {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index e400375..11f7153 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -486,7 +486,8 @@
 #define CMD_STATS_BG_BUF_RELEASE 56
 #define CMD_STATS_BF_BUF_RELEASE 57
 #define CMD_STATS_BHIST_BUF_RELEASE 58
-
+#define CMD_VFE_SOF_COUNT_UPDATE 59
+#define CMD_VFE_COUNT_SOF_ENABLE 60
 
 #define CMD_AXI_CFG_PRIM               BIT(8)
 #define CMD_AXI_CFG_PRIM_ALL_CHNLS     BIT(9)
@@ -497,6 +498,15 @@
 
 #define CMD_AXI_START  0xE1
 #define CMD_AXI_STOP   0xE2
+#define CMD_AXI_RESET  0xE3
+
+
+#define AXI_CMD_PREVIEW      BIT(0)
+#define AXI_CMD_CAPTURE      BIT(1)
+#define AXI_CMD_RECORD       BIT(2)
+#define AXI_CMD_ZSL          BIT(3)
+#define AXI_CMD_RAW_CAPTURE  BIT(4)
+#define AXI_CMD_LIVESHOT     BIT(5)
 
 /* vfe config command: config command(from config thread)*/
 struct msm_vfe_cfg_cmd {
@@ -1749,6 +1759,15 @@
 	uint32_t len;
 };
 
+struct msm_camera_vfe_params_t {
+	uint32_t operation_mode;
+	uint32_t capture_count;
+	uint32_t skip_abort;
+	uint16_t port_info;
+	uint32_t inst_handle;
+	uint16_t cmd_type;
+};
+
 enum msm_camss_irq_idx {
 	CAMERA_SS_IRQ_0,
 	CAMERA_SS_IRQ_1,
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index ab9e70c..9fa5932 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -65,7 +65,9 @@
 #define MSG_ID_STATS_BG                 46
 #define MSG_ID_STATS_BF                 47
 #define MSG_ID_STATS_BHIST              48
-
+#define MSG_ID_RDI0_UPDATE_ACK          49
+#define MSG_ID_RDI1_UPDATE_ACK          50
+#define MSG_ID_RDI2_UPDATE_ACK          51
 
 /* ISP command IDs */
 #define VFE_CMD_DUMMY_0                                 0
@@ -218,6 +220,19 @@
 #define VFE_CMD_STATS_BHIST_START                       147
 #define VFE_CMD_STATS_BHIST_STOP                        148
 #define VFE_CMD_RESET_2                                 149
+#define VFE_CMD_FOV_ENC_CFG                             150
+#define VFE_CMD_FOV_VIEW_CFG                            151
+#define VFE_CMD_FOV_ENC_UPDATE                          152
+#define VFE_CMD_FOV_VIEW_UPDATE                         153
+#define VFE_CMD_SCALER_ENC_CFG                          154
+#define VFE_CMD_SCALER_VIEW_CFG                         155
+#define VFE_CMD_SCALER_ENC_UPDATE                       156
+#define VFE_CMD_SCALER_VIEW_UPDATE                      157
+#define VFE_CMD_COLORXFORM_ENC_CFG                      158
+#define VFE_CMD_COLORXFORM_VIEW_CFG                     159
+#define VFE_CMD_COLORXFORM_ENC_UPDATE                   160
+#define VFE_CMD_COLORXFORM_VIEW_UPDATE                  161
+#define VFE_CMD_TEST_GEN_CFG                            162
 
 struct msm_isp_cmd {
 	int32_t  id;
diff --git a/include/media/tavarua.h b/include/media/tavarua.h
index adbdada..d7b1340 100644
--- a/include/media/tavarua.h
+++ b/include/media/tavarua.h
@@ -176,6 +176,7 @@
 	V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
 	V4L2_CID_PRIVATE_SPUR_SELECTION,
 	V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
+	V4L2_CID_PRIVATE_VALID_CHANNEL,
 
 };
 
@@ -336,6 +337,16 @@
 	RDS_AF_JUMP,
 };
 
+/* Band limits */
+#define REGION_US_EU_BAND_LOW		87500
+#define REGION_US_EU_BAND_HIGH		108000
+#define REGION_JAPAN_STANDARD_BAND_LOW	76000
+#define REGION_JAPAN_STANDARD_BAND_HIGH	90000
+#define REGION_JAPAN_WIDE_BAND_LOW	90000
+#define REGION_JAPAN_WIDE_BAND_HIGH	108000
+#define MPX_DCC_BYPASS_REG		0x88C0
+#define MPX_DCC_DATA_REG		0x88C2
+
 enum audio_path {
 	FM_DIGITAL_PATH,
 	FM_ANALOG_PATH
@@ -534,6 +545,12 @@
 #define SPUR_TABLE_START_ADDR	(SPUR_TABLE_ADDR + 1)
 #define XFR_PEEK_COMPLETE	(XFR_PEEK_MODE | READ_COMPLETE)
 #define XFR_POKE_COMPLETE	(XFR_POKE_MODE)
+#define TUNE_MULT		(16)
+#define ADJ_CHANNEL_KHZ		(50)
+#define MPX_DCC_UPPER_LIMIT	(20000)
+#define MPX_DCC_LIMIT		(12566)
+#define INVALID_CHANNEL		(0)
+#define VALID_CHANNEL		(1)
 
 #define COMPUTE_SPUR(val)	((((val) - (76000)) / (50)))
 #define GET_FREQ(val, bit)	((bit == 1) ? ((val) >> 8) : ((val) & 0xFF))
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1c09820..0296174 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -43,14 +43,17 @@
 	TP_STRUCT__entry(
 		__field(	int,	irq		)
 		__string(	name,	action->name	)
+		__field(void*,	handler)
 	),
 
 	TP_fast_assign(
 		__entry->irq = irq;
 		__assign_str(name, action->name);
+		__entry->handler = action->handler;
 	),
 
-	TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
+	TP_printk("irq=%d name=%s handler=%pf",
+		 __entry->irq, __get_str(name), __entry->handler)
 );
 
 /**
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 425bcfe..dd53c79 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -43,15 +43,17 @@
  */
 TRACE_EVENT(timer_start,
 
-	TP_PROTO(struct timer_list *timer, unsigned long expires),
+	TP_PROTO(struct timer_list *timer,
+		 unsigned long expires, char deferrable),
 
-	TP_ARGS(timer, expires),
+	TP_ARGS(timer, expires, deferrable),
 
 	TP_STRUCT__entry(
 		__field( void *,	timer		)
 		__field( void *,	function	)
 		__field( unsigned long,	expires		)
 		__field( unsigned long,	now		)
+		__field(char,	deferrable)
 	),
 
 	TP_fast_assign(
@@ -59,11 +61,12 @@
 		__entry->function	= timer->function;
 		__entry->expires	= expires;
 		__entry->now		= jiffies;
+		__entry->deferrable     = deferrable;
 	),
 
-	TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
+	TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] defer=%c",
 		  __entry->timer, __entry->function, __entry->expires,
-		  (long)__entry->expires - __entry->now)
+		  (long)__entry->expires - __entry->now, __entry->deferrable)
 );
 
 /**
diff --git a/kernel/timer.c b/kernel/timer.c
index a297ffc..24c5d20 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -584,7 +584,8 @@
 debug_activate(struct timer_list *timer, unsigned long expires)
 {
 	debug_timer_activate(timer);
-	trace_timer_start(timer, expires);
+	trace_timer_start(timer, expires,
+			 tbase_get_deferrable(timer->base) > 0 ? 'y' : 'n');
 }
 
 static inline void debug_deactivate(struct timer_list *timer)
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index d0ec4f3..1967297 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@
 	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
 		msg, raw_smp_processor_id(),
 		current->comm, task_pid_nr(current));
-	printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
+	printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
 			".owner_cpu: %d\n",
 		lock, lock->magic,
 		owner ? owner->comm : "<none>",
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 733dc22..22a4dbe 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -701,6 +701,9 @@
 	if (!skb)
 		return;
 
+	if (conn->hcon == NULL || conn->hcon->hdev == NULL)
+		return;
+
 	if (lmp_no_flush_capable(conn->hcon->hdev))
 		flags = ACL_START_NO_FLUSH;
 	else
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1390eca..dc4bf2f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -138,6 +138,21 @@
 
 static inline void rfcomm_session_put(struct rfcomm_session *s)
 {
+	bool match = false;
+	struct rfcomm_session *sess;
+	struct list_head *p, *n;
+	list_for_each_safe(p, n, &session_list) {
+		sess = list_entry(p, struct rfcomm_session, list);
+		if (s == sess) {
+			match = true;
+			break;
+		}
+	}
+	if (!match) {
+		BT_ERR("session already freed previously");
+		dump_stack();
+		return;
+	}
 	if (atomic_dec_and_test(&s->refcnt))
 		rfcomm_session_del(s);
 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8129d97..5e5ad91 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -18,6 +18,7 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/netdevice.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
@@ -176,6 +177,7 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	struct tc_prio_qopt *qopt;
 	int i;
+	int flow_change = 0;
 
 	if (nla_len(opt) < sizeof(*qopt))
 		return -EINVAL;
@@ -190,7 +192,10 @@
 	}
 
 	sch_tree_lock(sch);
-	q->enable_flow = qopt->enable_flow;
+	if (q->enable_flow != qopt->enable_flow) {
+		q->enable_flow = qopt->enable_flow;
+		flow_change = 1;
+	}
 	q->bands = qopt->bands;
 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
@@ -225,6 +230,13 @@
 			}
 		}
 	}
+
+	/* Schedule qdisc when flow re-enabled */
+	if (flow_change && q->enable_flow) {
+		if (!test_bit(__QDISC_STATE_DEACTIVATED,
+			      &sch->state))
+			__netif_schedule(qdisc_root(sch));
+	}
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index c94c2e5..f4f55fa 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -4840,6 +4840,10 @@
 	{SITAR_A_RX_LINE_1_GAIN, 0x10, 0x10},
 	{SITAR_A_RX_LINE_2_GAIN, 0x10, 0x10},
 
+	/* Set the MICBIAS default output as pull down*/
+	{SITAR_A_MICB_1_CTL, 0x01, 0x01},
+	{SITAR_A_MICB_2_CTL, 0x01, 0x01},
+
 	/* Initialize mic biases to differential mode */
 	{SITAR_A_MICB_1_INT_RBIAS, 0x24, 0x24},
 	{SITAR_A_MICB_2_INT_RBIAS, 0x24, 0x24},
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index 68892c1..571e71c 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -3687,19 +3687,9 @@
 	return v_hs_max;
 }
 
-static void tabla_codec_calibrate_hs_polling(struct snd_soc_codec *codec)
+static void tabla_codec_calibrate_rel(struct snd_soc_codec *codec)
 {
-	u8 *n_ready, *n_cic;
-	struct tabla_mbhc_btn_detect_cfg *btn_det;
 	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
-	const s16 v_ins_hu = tabla_get_current_v_ins(tabla, true);
-
-	btn_det = TABLA_MBHC_CAL_BTN_DET_PTR(tabla->mbhc_cfg.calibration);
-
-	snd_soc_write(codec, TABLA_A_CDC_MBHC_VOLT_B1_CTL,
-		      v_ins_hu & 0xFF);
-	snd_soc_write(codec, TABLA_A_CDC_MBHC_VOLT_B2_CTL,
-		      (v_ins_hu >> 8) & 0xFF);
 
 	snd_soc_write(codec, TABLA_A_CDC_MBHC_VOLT_B3_CTL,
 		      tabla->mbhc_data.v_b1_hu & 0xFF);
@@ -3720,6 +3710,23 @@
 		      tabla->mbhc_data.v_brl & 0xFF);
 	snd_soc_write(codec, TABLA_A_CDC_MBHC_VOLT_B12_CTL,
 		      (tabla->mbhc_data.v_brl >> 8) & 0xFF);
+}
+
+static void tabla_codec_calibrate_hs_polling(struct snd_soc_codec *codec)
+{
+	u8 *n_ready, *n_cic;
+	struct tabla_mbhc_btn_detect_cfg *btn_det;
+	struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+	const s16 v_ins_hu = tabla_get_current_v_ins(tabla, true);
+
+	btn_det = TABLA_MBHC_CAL_BTN_DET_PTR(tabla->mbhc_cfg.calibration);
+
+	snd_soc_write(codec, TABLA_A_CDC_MBHC_VOLT_B1_CTL,
+		      v_ins_hu & 0xFF);
+	snd_soc_write(codec, TABLA_A_CDC_MBHC_VOLT_B2_CTL,
+		      (v_ins_hu >> 8) & 0xFF);
+
+	tabla_codec_calibrate_rel(codec);
 
 	n_ready = tabla_mbhc_cal_btn_det_mp(btn_det, TABLA_BTN_DET_N_READY);
 	snd_soc_write(codec, TABLA_A_CDC_MBHC_TIMER_B1_CTL,
@@ -5671,15 +5678,55 @@
 	return r;
 }
 
+static void tabla_mbhc_calc_rel_thres(struct snd_soc_codec *codec, s16 mv)
+{
+	s16 deltamv;
+	struct tabla_priv *tabla;
+	struct tabla_mbhc_btn_detect_cfg *btn_det;
+
+	tabla = snd_soc_codec_get_drvdata(codec);
+	btn_det = TABLA_MBHC_CAL_BTN_DET_PTR(tabla->mbhc_cfg.calibration);
+
+	tabla->mbhc_data.v_b1_h =
+	    tabla_codec_v_sta_dce(codec, DCE,
+				  mv + btn_det->v_btn_press_delta_cic);
+
+	tabla->mbhc_data.v_brh = tabla->mbhc_data.v_b1_h;
+
+	tabla->mbhc_data.v_brl = TABLA_MBHC_BUTTON_MIN;
+
+	deltamv = mv + btn_det->v_btn_press_delta_sta;
+	tabla->mbhc_data.v_b1_hu = tabla_codec_v_sta_dce(codec, STA, deltamv);
+
+	deltamv = mv + btn_det->v_btn_press_delta_cic;
+	tabla->mbhc_data.v_b1_huc = tabla_codec_v_sta_dce(codec, DCE, deltamv);
+}
+
+static void tabla_mbhc_set_rel_thres(struct snd_soc_codec *codec, s16 mv)
+{
+	tabla_mbhc_calc_rel_thres(codec, mv);
+	tabla_codec_calibrate_rel(codec);
+}
+
+static s16 tabla_mbhc_highest_btn_mv(struct snd_soc_codec *codec)
+{
+	struct tabla_priv *tabla;
+	struct tabla_mbhc_btn_detect_cfg *btn_det;
+	u16 *btn_high;
+
+	tabla = snd_soc_codec_get_drvdata(codec);
+	btn_det = TABLA_MBHC_CAL_BTN_DET_PTR(tabla->mbhc_cfg.calibration);
+	btn_high = tabla_mbhc_cal_btn_det_mp(btn_det, TABLA_BTN_DET_V_BTN_HIGH);
+
+	return btn_high[btn_det->num_btn - 1];
+}
+
 static void tabla_mbhc_calc_thres(struct snd_soc_codec *codec)
 {
 	struct tabla_priv *tabla;
-	s16 btn_mv = 0, btn_delta_mv;
 	struct tabla_mbhc_btn_detect_cfg *btn_det;
 	struct tabla_mbhc_plug_type_cfg *plug_type;
-	u16 *btn_high;
 	u8 *n_ready;
-	int i;
 
 	tabla = snd_soc_codec_get_drvdata(codec);
 	btn_det = TABLA_MBHC_CAL_BTN_DET_PTR(tabla->mbhc_cfg.calibration);
@@ -5730,22 +5777,7 @@
 					     false);
 	}
 
-	btn_high = tabla_mbhc_cal_btn_det_mp(btn_det, TABLA_BTN_DET_V_BTN_HIGH);
-	for (i = 0; i < btn_det->num_btn; i++)
-		btn_mv = btn_high[i] > btn_mv ? btn_high[i] : btn_mv;
-
-	tabla->mbhc_data.v_b1_h = tabla_codec_v_sta_dce(codec, DCE, btn_mv);
-	btn_delta_mv = btn_mv + btn_det->v_btn_press_delta_sta;
-	tabla->mbhc_data.v_b1_hu =
-	    tabla_codec_v_sta_dce(codec, STA, btn_delta_mv);
-
-	btn_delta_mv = btn_mv + btn_det->v_btn_press_delta_cic;
-
-	tabla->mbhc_data.v_b1_huc =
-	    tabla_codec_v_sta_dce(codec, DCE, btn_delta_mv);
-
-	tabla->mbhc_data.v_brh = tabla->mbhc_data.v_b1_h;
-	tabla->mbhc_data.v_brl = TABLA_MBHC_BUTTON_MIN;
+	tabla_mbhc_calc_rel_thres(codec, tabla_mbhc_highest_btn_mv(codec));
 
 	tabla->mbhc_data.v_no_mic =
 	    tabla_codec_v_sta_dce(codec, STA, plug_type->v_no_mic);
@@ -5904,8 +5936,9 @@
 {
 	int i, mask;
 	short dce, sta;
-	s32 mv, mv_s, stamv_s;
+	s32 mv, mv_s, stamv, stamv_s;
 	bool vddio;
+	u16 *btn_high;
 	int btn = -1, meas = 0;
 	struct tabla_priv *priv = data;
 	const struct tabla_mbhc_btn_detect_cfg *d =
@@ -5918,6 +5951,7 @@
 
 	pr_debug("%s: enter\n", __func__);
 
+	btn_high = tabla_mbhc_cal_btn_det_mp(d, TABLA_BTN_DET_V_BTN_HIGH);
 	TABLA_ACQUIRE_LOCK(priv->codec_resource_lock);
 	if (priv->mbhc_state == MBHC_STATE_POTENTIAL_RECOVERY) {
 		pr_debug("%s: mbhc is being recovered, skip button press\n",
@@ -5954,35 +5988,34 @@
 			pr_debug("%s: Button is already released shortly after "
 				 "resume\n", __func__);
 			n_btn_meas = 0;
-		} else {
-			pr_debug("%s: Button is already released without "
-				 "resume", __func__);
-			sta = tabla_codec_read_sta_result(codec);
-			stamv_s = tabla_codec_sta_dce_v(codec, 0, sta);
-			if (vddio)
-				stamv_s = tabla_scale_v_micb_vddio(priv,
-								   stamv_s,
-								   false);
-			btn = tabla_determine_button(priv, mv_s);
-			if (btn != tabla_determine_button(priv, stamv_s))
-				btn = -1;
-			goto done;
 		}
 	}
 
-	/* determine pressed button */
+	/* save hw dce */
 	btnmeas[meas++] = tabla_determine_button(priv, mv_s);
-	pr_debug("%s: meas %d - DCE %d,%d,%d button %d\n", __func__,
-		 meas - 1, dce, mv, mv_s, btnmeas[meas - 1]);
-	if (n_btn_meas == 0)
-		btn = btnmeas[0];
+	pr_debug("%s: meas HW - DCE %x,%d,%d button %d\n", __func__,
+		 dce, mv, mv_s, btnmeas[0]);
+	if (n_btn_meas == 0) {
+		sta = tabla_codec_read_sta_result(codec);
+		stamv_s = stamv = tabla_codec_sta_dce_v(codec, 0, sta);
+		if (vddio)
+			stamv_s = tabla_scale_v_micb_vddio(priv, stamv, false);
+		btn = tabla_determine_button(priv, stamv_s);
+		pr_debug("%s: meas HW - STA %x,%d,%d button %d\n", __func__,
+			 sta, stamv, stamv_s, btn);
+		BUG_ON(meas != 1);
+		if (btnmeas[0] != btn)
+			btn = -1;
+	}
+
+	/* determine pressed button */
 	for (; ((d->n_btn_meas) && (meas < (d->n_btn_meas + 1))); meas++) {
 		dce = tabla_codec_sta_dce(codec, 1, false);
 		mv = tabla_codec_sta_dce_v(codec, 1, dce);
 		mv_s = vddio ? tabla_scale_v_micb_vddio(priv, mv, false) : mv;
 
 		btnmeas[meas] = tabla_determine_button(priv, mv_s);
-		pr_debug("%s: meas %d - DCE %d,%d,%d button %d\n",
+		pr_debug("%s: meas %d - DCE %x,%d,%d button %d\n",
 			 __func__, meas, dce, mv, mv_s, btnmeas[meas]);
 		/* if large enough measurements are collected,
 		 * start to check if last all n_btn_con measurements were
@@ -6010,6 +6043,8 @@
 				 "press\n", __func__);
 			goto done;
 		}
+		/* narrow down release threshold */
+		tabla_mbhc_set_rel_thres(codec, btn_high[btn]);
 		mask = tabla_get_button_mask(btn);
 		priv->buttons_pressed |= mask;
 		wcd9xxx_lock_sleep(core);
@@ -6115,6 +6150,8 @@
 		priv->buttons_pressed &= ~TABLA_JACK_BUTTON_MASK;
 	}
 
+	/* revert narrowed release threshold */
+	tabla_mbhc_calc_rel_thres(codec, tabla_mbhc_highest_btn_mv(codec));
 	tabla_codec_calibrate_hs_polling(codec);
 
 	if (priv->mbhc_cfg.gpio)
@@ -7590,6 +7627,11 @@
 	{TABLA_A_RX_LINE_3_GAIN, 0x10, 0x10},
 	{TABLA_A_RX_LINE_4_GAIN, 0x10, 0x10},
 
+	/* Set the MICBIAS default output as pull down*/
+	{TABLA_A_MICB_1_CTL, 0x01, 0x01},
+	{TABLA_A_MICB_2_CTL, 0x01, 0x01},
+	{TABLA_A_MICB_3_CTL, 0x01, 0x01},
+
 	/* Initialize mic biases to differential mode */
 	{TABLA_A_MICB_1_INT_RBIAS, 0x24, 0x24},
 	{TABLA_A_MICB_2_INT_RBIAS, 0x24, 0x24},
@@ -7645,11 +7687,16 @@
 };
 
 static const struct tabla_reg_mask_val tabla_1_x_codec_reg_init_val[] = {
+	/* Set the MICBIAS default output as pull down*/
+	{TABLA_1_A_MICB_4_CTL, 0x01, 0x01},
 	/* Initialize mic biases to differential mode */
 	{TABLA_1_A_MICB_4_INT_RBIAS, 0x24, 0x24},
 };
 
 static const struct tabla_reg_mask_val tabla_2_higher_codec_reg_init_val[] = {
+
+	/* Set the MICBIAS default output as pull down*/
+	{TABLA_2_A_MICB_4_CTL, 0x01, 0x01},
 	/* Initialize mic biases to differential mode */
 	{TABLA_2_A_MICB_4_INT_RBIAS, 0x24, 0x24},
 };
diff --git a/sound/soc/msm/msm-pcm-afe.c b/sound/soc/msm/msm-pcm-afe.c
index 5f3cada..b7b4d51 100644
--- a/sound/soc/msm/msm-pcm-afe.c
+++ b/sound/soc/msm/msm-pcm-afe.c
@@ -140,9 +140,6 @@
 						runtime->channels * 2)));
 				pr_debug("prtd->poll_time: %d",
 						prtd->poll_time);
-				hrtimer_start(&prtd->hrt,
-					ns_to_ktime(0),
-					HRTIMER_MODE_REL);
 				break;
 			}
 			case AFE_EVENT_RTPORT_STOP:
@@ -206,9 +203,6 @@
 				snd_pcm_lib_period_bytes(prtd->substream)
 					* 1000 * 1000)/(runtime->rate
 					* runtime->channels * 2)));
-			hrtimer_start(&prtd->hrt,
-				ns_to_ktime(0),
-				HRTIMER_MODE_REL);
 			pr_debug("prtd->poll_time : %d", prtd->poll_time);
 			break;
 		}
@@ -465,6 +459,8 @@
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 		pr_debug("%s: SNDRV_PCM_TRIGGER_START\n", __func__);
 		prtd->start = 1;
+		hrtimer_start(&prtd->hrt, ns_to_ktime(0),
+					HRTIMER_MODE_REL);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
diff --git a/sound/soc/msm/msm-pcm-voip.c b/sound/soc/msm/msm-pcm-voip.c
index b18117c..359414b 100644
--- a/sound/soc/msm/msm-pcm-voip.c
+++ b/sound/soc/msm/msm-pcm-voip.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -531,6 +531,7 @@
 				list_first_entry(&prtd->free_in_queue,
 						struct voip_buf_node, list);
 			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
 			if (prtd->mode == MODE_PCM) {
 				ret = copy_from_user(&buf_node->frame.voc_pkt,
 							buf, count);
@@ -538,6 +539,7 @@
 			} else
 				ret = copy_from_user(&buf_node->frame,
 							buf, count);
+			spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
 			list_add_tail(&buf_node->list, &prtd->in_queue);
 			spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
 		} else {
@@ -582,6 +584,7 @@
 			buf_node = list_first_entry(&prtd->out_queue,
 					struct voip_buf_node, list);
 			list_del(&buf_node->list);
+			spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
 			if (prtd->mode == MODE_PCM)
 				ret = copy_to_user(buf,
 						   &buf_node->frame.voc_pkt,
@@ -595,6 +598,7 @@
 					__func__, ret);
 				ret = -EFAULT;
 			}
+			spin_lock_irqsave(&prtd->dsp_ul_lock, dsp_flags);
 			list_add_tail(&buf_node->list,
 						&prtd->free_out_queue);
 			spin_unlock_irqrestore(&prtd->dsp_ul_lock, dsp_flags);
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 4c0ac9e..a4f4b60 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -448,7 +448,7 @@
 
 	if ((port_id == RT_PROXY_DAI_001_RX) ||
 		(port_id == RT_PROXY_DAI_002_TX))
-		return -EINVAL;
+		return 0;
 	if ((port_id == RT_PROXY_DAI_002_RX) ||
 		(port_id == RT_PROXY_DAI_001_TX))
 		port_id = VIRTUAL_ID_TO_PORTID(port_id);
@@ -608,7 +608,7 @@
 
 	if ((port_id == RT_PROXY_DAI_001_RX) ||
 		(port_id == RT_PROXY_DAI_002_TX))
-		return -EINVAL;
+		return 0;
 	if ((port_id == RT_PROXY_DAI_002_RX) ||
 		(port_id == RT_PROXY_DAI_001_TX))
 		port_id = VIRTUAL_ID_TO_PORTID(port_id);
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 99fd1d3..485569b 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -40,7 +40,11 @@
 	union afe_port_config port_config;
 };
 
-static struct clk *pcm_clk;
+static struct clk *pcm_src_clk;
+static struct clk *pcm_branch_clk;
+static struct clk *pcm_oe_src_clk;
+static struct clk *pcm_oe_branch_clk;
+
 static DEFINE_MUTEX(aux_pcm_mutex);
 static int aux_pcm_count;
 
@@ -120,6 +124,9 @@
 	if (IS_ERR_VALUE(rc))
 		dev_err(dai->dev, "fail to close AUX PCM TX port\n");
 
+	clk_disable_unprepare(pcm_branch_clk);
+	clk_disable_unprepare(pcm_oe_branch_clk);
+
 	mutex_unlock(&aux_pcm_mutex);
 }
 
@@ -127,8 +134,11 @@
 		struct snd_soc_dai *dai)
 {
 	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
 	int rc = 0;
 
+	auxpcm_pdata = dai->dev->platform_data;
+
 	mutex_lock(&aux_pcm_mutex);
 
 	if (aux_pcm_count == 2) {
@@ -170,12 +180,37 @@
 	 * assert/deasset and afe_open sequence is not followed.
 	 */
 
+	rc = clk_set_rate(pcm_src_clk, auxpcm_pdata->pcm_clk_rate);
+	if (rc < 0) {
+		pr_err("%s: clk_set_rate failed\n", __func__);
+		goto fail;
+	}
+
+	rc = clk_prepare_enable(pcm_branch_clk);
+	if (rc) {
+		pr_err("%s: clk enable failed\n", __func__);
+		goto fail;
+	}
+
+	rc = clk_set_rate(pcm_oe_src_clk, 24576000>>1);
+	if (rc < 0) {
+		pr_err("%s: clk_set_rate on pcm oe failed\n", __func__);
+		goto fail;
+	}
+
+	rc = clk_prepare_enable(pcm_oe_branch_clk);
+	if (rc) {
+		pr_err("%s: clk enable pcm_oe_branch_clk failed\n", __func__);
+		goto fail;
+	}
+
 	afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
 
 	afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
 
 	mutex_unlock(&aux_pcm_mutex);
 
+fail:
 	return rc;
 }
 
@@ -217,6 +252,7 @@
 	auxpcm_pdata = (struct msm_dai_auxpcm_pdata *)
 					dev_get_drvdata(dai->dev);
 	dai->dev->platform_data = auxpcm_pdata;
+	dai->id = dai->dev->id;
 
 	mutex_lock(&aux_pcm_mutex);
 
@@ -225,9 +261,41 @@
 	 * data to the cpu driver, since cpu drive is unaware of any
 	 * boarc specific configuration.
 	 */
-	if (!pcm_clk)
-		pcm_clk = clk_get(dai->dev, auxpcm_pdata->clk);
+	if ((!pcm_src_clk) || (!pcm_branch_clk)) {
+		pcm_src_clk = clk_get(dai->dev, auxpcm_pdata->clk);
 
+		if (IS_ERR(pcm_src_clk)) {
+			pr_err("%s: could not get pcm_src_clk\n", __func__);
+			pcm_src_clk = NULL;
+			return -ENODEV;
+		}
+
+		pcm_branch_clk = clk_get(dai->dev, "ibit_clk");
+
+		if (IS_ERR(pcm_branch_clk)) {
+			pr_err("%s: could not get pcm_branch_clk\n", __func__);
+			pcm_branch_clk = NULL;
+			return -ENODEV;
+		}
+	}
+
+	if ((!pcm_oe_src_clk) || (!pcm_oe_branch_clk)) {
+
+		pcm_oe_src_clk = clk_get(dai->dev, "core_oe_src_clk");
+
+		if (IS_ERR(pcm_oe_src_clk)) {
+			pr_err("%s: could not get pcm_oe_src_clk\n", __func__);
+			pcm_oe_src_clk = NULL;
+			return -ENODEV;
+		}
+
+		pcm_oe_branch_clk = clk_get(dai->dev, "core_oe_clk");
+		if (IS_ERR(pcm_oe_branch_clk)) {
+			pr_err("%s: could not get pcm_oe_clk\n", __func__);
+			pcm_oe_branch_clk = NULL;
+			return -ENODEV;
+		}
+	}
 	mutex_unlock(&aux_pcm_mutex);
 
 	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data), GFP_KERNEL);
@@ -813,7 +881,7 @@
 	.remove = msm_dai_q6_dai_remove,
 };
 
-static int msm_auxpcm_dev_probe(struct platform_device *pdev)
+static int __devinit msm_auxpcm_dev_probe(struct platform_device *pdev)
 {
 	int id;
 	void *plat_data;
@@ -837,6 +905,7 @@
 	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
 
 	dev_set_drvdata(&pdev->dev, plat_data);
+	pdev->dev.id = id;
 
 	switch (id) {
 	case AFE_PORT_ID_PRIMARY_PCM_RX:
@@ -855,7 +924,7 @@
 	return rc;
 }
 
-static int msm_auxpcm_resource_probe(
+static int __devinit msm_auxpcm_resource_probe(
 			struct platform_device *pdev)
 {
 	int rc = 0;
@@ -950,13 +1019,13 @@
 	return rc;
 }
 
-static int msm_auxpcm_dev_remove(struct platform_device *pdev)
+static int __devexit msm_auxpcm_dev_remove(struct platform_device *pdev)
 {
 	snd_soc_unregister_dai(&pdev->dev);
 	return 0;
 }
 
-static int msm_auxpcm_resource_remove(
+static int __devexit msm_auxpcm_resource_remove(
 				struct platform_device *pdev)
 {
 	void *auxpcm_pdata;
@@ -967,22 +1036,20 @@
 	return 0;
 }
 
-static const struct of_device_id msm_auxpcm_resource_dt_match[] = {
+static struct of_device_id msm_auxpcm_resource_dt_match[] = {
 	{ .compatible = "qcom,msm-auxpcm-resource", },
 	{}
 };
-MODULE_DEVICE_TABLE(of, msm_auxpcm_resource_dt_match);
 
-static const struct of_device_id msm_auxpcm_dev_dt_match[] = {
+static struct of_device_id msm_auxpcm_dev_dt_match[] = {
 	{ .compatible = "qcom,msm-auxpcm-dev", },
 	{}
 };
-MODULE_DEVICE_TABLE(of, msm_auxpcm_dev_dt_match);
 
 
-static struct platform_driver msm_auxpcm_dev = {
+static struct platform_driver msm_auxpcm_dev_driver = {
 	.probe  = msm_auxpcm_dev_probe,
-	.remove = msm_auxpcm_dev_remove,
+	.remove = __devexit_p(msm_auxpcm_dev_remove),
 	.driver = {
 		.name = "msm-auxpcm-dev",
 		.owner = THIS_MODULE,
@@ -990,9 +1057,9 @@
 	},
 };
 
-static struct platform_driver msm_auxpcm_resource = {
+static struct platform_driver msm_auxpcm_resource_driver = {
 	.probe  = msm_auxpcm_resource_probe,
-	.remove  = msm_auxpcm_resource_remove,
+	.remove  = __devexit_p(msm_auxpcm_resource_remove),
 	.driver = {
 		.name = "msm-auxpcm-resource",
 		.owner = THIS_MODULE,
@@ -1134,22 +1201,23 @@
 {
 	int rc;
 
-	rc = platform_driver_register(&msm_auxpcm_dev);
+	rc = platform_driver_register(&msm_auxpcm_dev_driver);
 	if (rc)
 		goto fail;
 
-	rc = platform_driver_register(&msm_auxpcm_resource);
+	rc = platform_driver_register(&msm_auxpcm_resource_driver);
+
 	if (rc) {
 		pr_err("%s: fail to register cpu dai driver\n", __func__);
-		platform_driver_unregister(&msm_auxpcm_dev);
+		platform_driver_unregister(&msm_auxpcm_dev_driver);
 		goto fail;
 	}
 
 	rc = platform_driver_register(&msm_dai_q6);
 	if (rc) {
 		pr_err("%s: fail to register dai q6 driver", __func__);
-		platform_driver_unregister(&msm_auxpcm_dev);
-		platform_driver_unregister(&msm_auxpcm_resource);
+		platform_driver_unregister(&msm_auxpcm_dev_driver);
+		platform_driver_unregister(&msm_auxpcm_resource_driver);
 		goto fail;
 	}
 
@@ -1157,8 +1225,8 @@
 	if (rc) {
 		pr_err("%s: fail to register dai q6 dev driver", __func__);
 		platform_driver_unregister(&msm_dai_q6);
-		platform_driver_unregister(&msm_auxpcm_dev);
-		platform_driver_unregister(&msm_auxpcm_resource);
+		platform_driver_unregister(&msm_auxpcm_dev_driver);
+		platform_driver_unregister(&msm_auxpcm_resource_driver);
 		goto fail;
 	}
 fail:
@@ -1170,8 +1238,8 @@
 {
 	platform_driver_unregister(&msm_dai_q6_dev);
 	platform_driver_unregister(&msm_dai_q6);
-	platform_driver_unregister(&msm_auxpcm_dev);
-	platform_driver_unregister(&msm_auxpcm_resource);
+	platform_driver_unregister(&msm_auxpcm_dev_driver);
+	platform_driver_unregister(&msm_auxpcm_resource_driver);
 }
 module_exit(msm_dai_q6_exit);
 
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d4ce733..704d63a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -148,7 +148,7 @@
 		return 0;
 
 	if (output[type].user_set) {
-		evname = __event_name(attr->type, attr->config);
+		evname = __event_name(attr->type, attr->config, NULL);
 		pr_err("Samples for '%s' event do not have %s attribute set. "
 		       "Cannot print '%s' field.\n",
 		       evname, sample_msg, output_field2str(field));
@@ -157,7 +157,7 @@
 
 	/* user did not ask for it explicitly so remove from the default list */
 	output[type].fields &= ~field;
-	evname = __event_name(attr->type, attr->config);
+	evname = __event_name(attr->type, attr->config, NULL);
 	pr_debug("Samples for '%s' event do not have %s attribute set. "
 		 "Skipping '%s' field.\n",
 		 evname, sample_msg, output_field2str(field));
@@ -305,7 +305,7 @@
 			if (event)
 				evname = event->name;
 		} else
-			evname = __event_name(attr->type, attr->config);
+			evname = __event_name(attr->type, attr->config, NULL);
 
 		printf("%s: ", evname ? evname : "[unknown]");
 	}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5b3a0ef..be2e0c5 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -301,10 +301,10 @@
 	if (evsel->name)
 		return evsel->name;
 
-	return __event_name(type, config);
+	return __event_name(type, config, NULL);
 }
 
-const char *__event_name(int type, u64 config)
+const char *__event_name(int type, u64 config, char *pmu_name)
 {
 	static char buf[32];
 
@@ -349,7 +349,12 @@
 		return tracepoint_id_to_name(config);
 
 	default:
-		break;
+		if (pmu_name) {
+			snprintf(buf, sizeof(buf), "%s 0x%" PRIx64, pmu_name,
+					config);
+			return buf;
+		} else
+			break;
 	}
 
 	return "unknown";
@@ -630,6 +635,32 @@
 	return 0;
 }
 
+int parse_events_add_numeric_legacy(struct list_head *list, int *idx,
+			     const char *name, unsigned long config,
+			     struct list_head *head_config)
+{
+	struct perf_event_attr attr;
+	struct perf_pmu *pmu;
+	char *pmu_name = strdup(name);
+
+	memset(&attr, 0, sizeof(attr));
+
+	pmu = perf_pmu__find(pmu_name);
+
+	if (!pmu)
+		return -EINVAL;
+
+	attr.type = pmu->type;
+	attr.config = config;
+
+	if (head_config &&
+	    config_attr(&attr, head_config, 1))
+		return -EINVAL;
+
+	return add_event(list, idx, &attr,
+			 (char *) __event_name(pmu->type, config, pmu_name));
+}
+
 int parse_events_add_numeric(struct list_head *list, int *idx,
 			     unsigned long type, unsigned long config,
 			     struct list_head *head_config)
@@ -645,7 +676,7 @@
 		return -EINVAL;
 
 	return add_event(list, idx, &attr,
-			 (char *) __event_name(type, config));
+			 (char *) __event_name(type, config, NULL));
 }
 
 int parse_events_add_pmu(struct list_head *list, int *idx,
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index ca069f8..4da2f3c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,7 +23,7 @@
 
 const char *event_type(int type);
 const char *event_name(struct perf_evsel *event);
-extern const char *__event_name(int type, u64 config);
+extern const char *__event_name(int type, u64 config, char *name);
 
 extern int parse_events_option(const struct option *opt, const char *str,
 			       int unset);
@@ -67,6 +67,10 @@
 int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
 			 unsigned long config1, unsigned long config2,
 			 char *mod);
+int parse_events_add_numeric_legacy(struct list_head *list, int *idx,
+			     const char *name, unsigned long config,
+			     struct list_head *head_config);
+
 int parse_events_add_numeric(struct list_head *list, int *idx,
 			     unsigned long type, unsigned long config,
 			     struct list_head *head_config);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 07b292d..581cd94 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -156,13 +156,13 @@
 event_legacy_shared_raw:
 PE_SH_RAW
 {
-	ABORT_ON(parse_events_add_numeric(list_event, idx, 6, $1, NULL));
+	ABORT_ON(parse_events_add_numeric_legacy(list_event, idx, "msm-l2", $1, NULL));
 }
 
 event_legacy_fabric_raw:
 PE_FAB_RAW
 {
-	ABORT_ON(parse_events_add_numeric(list_event, idx, 7, $1, NULL));
+	ABORT_ON(parse_events_add_numeric_legacy(list_event, idx, "msm-busmon", $1, NULL));
 }
 
 event_config: