Merge "Perf: Re-enable counters after power collapse" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
new file mode 100644
index 0000000..5c6b804
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -0,0 +1,50 @@
+ION Memory Manager (ION)
+
+ION is a memory manager that allows for sharing of buffers between different
+processes and between user space and kernel space. ION manages different
+memory spaces by separating the memory spaces into "heaps". Depending on the
+type of heap ION must reserve memory using the msm specific memory reservation
+bindings (see Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties
+
+- compatible: "qcom,msm-ion"
+- reg: The ID of the ION heap.
+
+Optional properties
+
+- compatible: "qcom,msm-ion-reserve" This is required if memory is to be reserved
+  as specified by qcom,memory-reservation-size below.
+- qcom,heap-align: Alignment of start of the memory in the heap.
+- qcom,heap-adjacent: ID of heap this heap needs to be adjacent to.
+- qcom,memory-reservation-size: size of reserved memory for the ION heap.
+- qcom,memory-reservation-type: type of memory to be reserved
+(see memory-reserve.txt for information about memory reservations)
+
+Example:
+	qcom,ion {
+                 compatible = "qcom,msm-ion";
+                 #address-cells = <1>;
+                 #size-cells = <0>;
+
+                 qcom,ion-heap@30 { /* SYSTEM HEAP */
+                         reg = <30>;
+                 };
+
+                 qcom,ion-heap@8 { /* CP_MM HEAP */
+                         compatible = "qcom,msm-ion-reserve";
+                         reg = <8>;
+                         qcom,heap-align = <0x1000>;
+                         qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+                         qcom,memory-reservation-size = <0x7800000>;
+                 };
+
+                 qcom,ion-heap@29 { /* FIRMWARE HEAP */
+                         compatible = "qcom,msm-ion-reserve";
+                         reg = <29>;
+                         qcom,heap-align = <0x20000>;
+                         qcom,heap-adjacent = <8>;
+                         qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+                         qcom,memory-reservation-size = <0xA00000>;
+
+	};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
new file mode 100644
index 0000000..6db1150
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -0,0 +1,140 @@
+Qualcomm mdss-dsi-panel
+
+mdss-dsi-panel is a dsi panel driver which supports panels that
+are compatable with MIPI display serial interface specification.
+
+Required properties:
+- compatible:				Must be "qcom,mdss-dsi-panel"
+- status:        			A string that has to be set to "okay/ok" to enable
+					the panel driver. By default this property will be
+					set to "disable". Will be set to "ok/okay" status
+					for specific platforms.
+- qcom,mdss-pan-res:			A two dimensional array that specifies the panel
+					resolution.
+- qcom,mdss-pan-bpp:			Specifies the panel bits per pixel. Default value is 24(rgb888).
+					18 = for rgb666
+					16 = for rgb565
+- qcom,mdss-panel-on-cmds:		An array of variable length that lists the init commands
+					of the panel. Each command will have the format specified
+					as below:
+					--> data type of the command
+					--> specifies whether this command packet is last.
+					--> virtual channel
+					--> Needs acknowledge from the panel or not.
+					--> wait time after the command is transmitter.
+					--> size of payload
+					--> payload.
+- qcom,mdss-panel-off-cmds:		An array of variable length that lists the panel off
+					commands. Each command will have the format specified
+					as below:
+					--> data type of the command
+					--> specifies whether this command packet is last.
+					--> virtual channel
+					--> Needs acknowledge from the panel or not.
+					--> wait time after the command is transmitter.
+					--> size of payload
+					--> payload.
+
+Required structure:
+- A qcom,mdss-dsi-panel node must be a child of an mdss-dsi controller node that links to
+    one of the two DSI controllers.
+
+
+Optional properties:
+- label:		        	A string used as a descriptive name of the panel
+- qcom,mdss-pan-porch-values:		An array of size 6 that specifies the panel blanking values.
+- qcom,mdss-pan-underflow-clr:		Specifies the controller settings for the panel underflow clear
+					settings. Default value is 0xff.
+- qcom,mdss-pan-bl-levels:		Specifies the backlight levels supported by the panel.
+					Default range is 1 to 255.
+
+- qcom,mdss-pan-dsi-mode:		Specifies the panel operating mode.
+					0 = enable video mode(default mode).
+					1 = enable command mode.
+- qcom,mdss-pan-dsi-h-pulse-mode:	Specifies the pulse mode option for the panel.
+					0 = Don't send hsa/he following vs/ve packet(default)
+					1 = Send hsa/he following vs/ve packet
+- qcom,mdss-pan-dsi-h-power-stop:	An Array of size 3 that specifies the power mode
+					during horizontal porch and sync periods of the panel.
+					0 = high speed mode(default mode).
+					1 = Low power mode for horizontal porches and sync pulse.
+- qcom,mdss-pan-dsi-bllp-power-stop:	An Array of size 2 that specifies the power mode
+					during blanking period and after EOF(end of frame).
+					0 = high speed mode(default mode).
+					1 = Low power mode during blanking and EOF.
+- qcom,mdss-pan-dsi-traffic-mode:	Specifies the panel traffic mode.
+					0 = non burst with sync pulses (default mode).
+					1 = non burst with sync start event.
+					2 = burst mode.
+- qcom,mdss-pan-dsi-dst-format:		Specifies the destination format.
+					0 = DSI_VIDEO_DST_FORMAT_RGB565.
+					1 = DSI_VIDEO_DST_FORMAT_RGB666.
+					2 = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE.
+					3 = DSI_VIDEO_DST_FORMAT_RGB888 (Default format)
+					6 = DSI_CMD_DST_FORMAT_RGB565
+					7 = DSI_CMD_DST_FORMAT_RGB666
+					8 = DSI_CMD_DST_FORMAT_RGB888
+- qcom,mdss-pan-dsi-vc:			Specifies the virtual channel identefier.
+					0 = default value.
+- qcom,mdss-pan-dsi-rgb-swap:		Specifies the R, G and B channel ordering.
+					0 = DSI_RGB_SWAP_RGB (default value)
+					1 = DSI_RGB_SWAP_RBG
+					2 = DSI_RGB_SWAP_BGR
+					3 = DSI_RGB_SWAP_BRG
+					4 = DSI_RGB_SWAP_GRB
+					5 = DSI_RGB_SWAP_GBR
+- qcom,mdss-pan-dsi-data-lanes:		An array that specifies the data lanes enabled.
+					<1 1 0 0> = data lanes 1 and 2 are enabled.(default).
+- qcom,mdss-pan-dsi-t-clk:		An array that specifies the byte clock cycles
+					before and after each mode switch.
+- qcom,mdss-pan-dsi-stream:		Specifies the packet stream to be used.
+					0 = stream 0 (default)
+					1 = stream 1
+- qcom,mdss-pan-dsi-mdp-tr:		Specifies the trigger mechanism to be used for MDP path.
+					0 = no trigger
+					2 = Tear check signal line used for trigger
+					4 = Triggered by software (default mode)
+					6 = Software trigger and TE
+- qcom,mdss-pan-dsi-dma-tr:		Specifies the trigger mechanism to be used for DMA path.
+					0 = no trigger
+					2 = Tear check signal line used for trigger
+					4 = Triggered by software (default mode)
+					5 = Software trigger and start/end of frame trigger.
+					6 = Software trigger and TE
+- qcom,mdss-pan-dsi-frame-rate:		Specifies the frame rate for the panel.
+					60 = 60 frames per second (default)
+
+Note, if a given optional qcom,* binding is not present, then the driver will configure
+the default values specified.
+
+Example:
+	qcom,mdss_dsi@fd922800 {
+
+		qcom,mdss_dsi_sim_video {
+			compatible = "qcom,mdss-dsi-panel";
+			label = "simulator video mode dsi panel";
+			status = "disable";
+			qcom,mdss-pan-res = <640 480>;
+			qcom,mdss-pan-bpp = <24>;
+			qcom,mdss-pan-porch-values = <6 2 6 6 2 6>;
+			qcom,mdss-pan-underflow-clr = <0xff>;
+			qcom,mdss-pan-bl-levels = <1 15>;
+			qcom,mdss-pan-dsi-mode = <0>;
+			qcom,mdss-pan-dsi-h-pulse-mode = <1>;
+			qcom,mdss-pan-dsi-h-power-stop = <1 1 1>;
+			qcom,mdss-pan-dsi-bllp-power-stop = <1 1>;
+			qcom,mdss-pan-dsi-traffic-mode = <0>;
+			qcom,mdss-pan-dsi-dst-format = <3>;
+			qcom,mdss-pan-dsi-vc = <0>;
+			qcom,mdss-pan-dsi-rgb-swap = <0>;
+			qcom,mdss-pan-dsi-data-lanes = <1 1 0 0>;
+			qcom,mdss-pan-dsi-t-clk = <0x24 0x03>;
+			qcom,mdss-pan-dsi-stream = <0>;
+			qcom,mdss-pan-dsi-mdp-tr = <0x04>;
+			qcom,mdss-pan-dsi-dma-tr = <0x04>;
+			qcom,mdss-pan-frame-rate = <60>;
+			qcom,panel-on-cmds = [32 01 00 00 00 02 00 00];
+			qcom,panel-off-cmds = [22 01 00 00 00 00 00];
+		};
+
+	};
diff --git a/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
index 1549f10..e212aca 100644
--- a/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
+++ b/Documentation/devicetree/bindings/ocmem/msm-ocmem.txt
@@ -18,6 +18,7 @@
 		    "ocmem_irq" corresponds to OCMEM Error Interrupt.
 		    "dm_irq" corresponds to DM Interrupt.
 - qcom,ocmem-num-regions: The number of OCMEM hardware memory regions.
+- qcom,resource-type: The hardware resource type of the OCMEM core.
 
 In addition to the information on the OCMEM core, the
 device tree contains additional information describing partitions
@@ -51,6 +52,7 @@
 		interrupts = <0 76 0 0 77 0>;
 		interrupt-names = "ocmem_irq", "dm_irq";
 		qcom,ocmem-num-regions = <0x3>;
+		qcom,resource-type = <0x706d636f>
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges = <0x0 0xfec00000 0x180000>;
diff --git a/Documentation/devicetree/bindings/pwm/qpnp-pwm.txt b/Documentation/devicetree/bindings/pwm/qpnp-pwm.txt
new file mode 100644
index 0000000..83ce3f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/qpnp-pwm.txt
@@ -0,0 +1,160 @@
+Qualcomm QPNP PWM/LPG controller
+
+qpnp-pwm driver supports Pulse Width Module (PWM) functionality. PWM feature is
+used in range of applications such as varying Display brightness, LED dimming,
+etc. The Qualcomm PMICs have a physical device called Light Pulse Generator
+(LPG). In addition to support PWM functionality, the LPG module provides
+a rich set of user defined PWM pattern configurations, such as sawtooth, linear
+up, linear down, triangular patterns etc. The PWM patterns are used in
+applications such as charger driver where the driver uses these patterns
+to indicate various states of charging.
+
+Required device bindings:
+- compatible:		should be "qcom,qpnp-pwm"
+- reg:			Offset and length of the controller's LPG channel register,
+			and LPG look-up table (LUT). The LPG look-up table is a
+			contiguous address space that is populated with PWM values.
+			The size of PWM value is 9 bit and the size of each
+			entry of the table is 8 bit. Thus, two entries are used
+			to fill each PWM value. The lower entry is used for PWM
+			LSB byte and higher entry is used for PWM MSB bit.
+- reg-names:		Names for the above registers.
+			"qpnp-lpg-channel-base" = physical base address of the
+			controller's LPG channel register.
+			"qpnp-lpg-lut-base" = physical base address of LPG LUT.
+- qcom,channel-id:	channel Id for the PWM.
+
+Optional device bindings:
+- qcom,channel-owner:	A string value to supply owner information.
+- qcom,mode-select:	0 = PWM mode
+			1 = LPG mode
+If this binding is specified along with the required bindings of PWM/LPG then
+in addition to configure PWM/LPG the qpnp-pwm driver also enables the feature
+at the probe time. In the case where the binding is not specified the qpnp-pwm
+driver does not enable the feature. Also, it is considered an error to specify
+a particular mode using this binding but not the respective feature subnode.
+
+All PWM devices support both PWM and LPG features within the same device.
+To support each feature, there are some required and optional bindings passed
+through device tree.
+
+The PWM device can enable one feature (either PWM or LPG) at any given time.
+Therefore, the qpnp-pwm driver applies the last PWM or LPG feature configuration
+and enables that feature.
+
+Required bindings to support PWM feature:
+- qcom,period:	PWM period time in microseconds.
+- qcom,duty:	PWM duty time in microseconds.
+- label:	"pwm"
+
+Required bindings to support LPG feature:
+The following bindings are needed to configure LPG mode, where a list of
+duty cycle percentages is populated. The size of the list cannot exceed
+the size of the LPG look-up table.
+
+- qcom,period:			PWM period time in microseconds.
+- qcom,duty-percents:		List of entries for look-up table
+- cell-index:			Index of look-up table that should be used to start
+				filling up the duty-pct list. start-idx + size of list
+				cannot exceed the size of look-up table.
+- label:			"lpg"
+
+
+Optional bindings to support LPG feature:
+- qcom,ramp-step-duration:	Time (in ms) to wait before loading next entry of LUT
+- qcom,lpg-lut-pause-hi:	Time (in ms) to wait once pattern reaches to hi
+				index.
+- qcom,lpg-lut-pause-lo:	Time (in ms) to wait once pattern reaches to lo
+				index.
+- qcom,lpg-lut-ramp-direction:	1 = Start the pattern from lo index to hi index.
+				0 = Start the pattern from hi index to lo index.
+- qcom,lpg-lut-pattern-repeat:	1 = Repeat the pattern after the pause once it
+				reaches to last duty cycle.
+				0 = Do not repeat the pattern.
+- qcom,lpg-lut-ramp-toggle:	1 = Toggle the direction of the pattern.
+				0 = Do not toggle the direction.
+- qcom,lpg-lut-enable-pause-hi:	1 = Enable pause time at hi index.
+				0 = Disable pause time at hi index.
+- qcom,lpg-lut-enable-pause-lo:	1 = Enable pause time at lo index.
+				0 = Disable pause time at lo index.
+
+
+Example:
+        qcom,spmi@fc4c0000 {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                qcom,pm8941@1 {
+                        spmi-slave-container;
+                        reg = <0x1>;
+                        #address-cells = <1>;
+                        #size-cells = <1>;
+
+                        pwm@b100 {
+                                #address-cells = <1>;
+                                #size-cells = <1>;
+                                compatible = "qcom,qpnp-pwm";
+                                reg = <0xb100 0x100>,
+                                      <0xb040 0x80>;
+				reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+                                qcom,channel-id = <0>;
+				status = "okay";
+                        };
+
+                        pwm@b200 {
+                                #address-cells = <1>;
+                                #size-cells = <1>;
+                                compatible = "qcom,qpnp-pwm";
+                                reg = <0xb200 0x100>,
+                                      <0xb040 0x80>;
+				reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+                                qcom,channel-id = <1>;
+                                qcom,period = <6000000>;
+				status = "okay";
+				qcom,pwm {
+					qcom,duty = <4000000>;
+					label = "pwm";
+				};
+                        };
+
+                        pwm@b500 {
+                                #address-cells = <1>;
+                                #size-cells = <1>;
+                                compatible = "qcom,qpnp-pwm";
+                                reg = <0xb500 0x100>,
+                                      <0xb040 0x80>;
+				reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+                                qcom,channel-id = <4>;
+                                qcom,period = <6000000>;
+				qcom,mode-select = <0>;
+				qcom,channel-owner = "RGB-led";
+				status = "okay";
+
+				qcom,pwm {
+					qcom,duty = <4000000>;
+					label = "pwm";
+				};
+
+				qcom,lpg {
+					qcom,duty-percents = <1 14 28 42 56 84 100
+							100 84 56 42 28 14 1>;
+					cell-index = <0>;
+					qcom,ramp-step-duration = <20>;
+					label = "lpg";
+				};
+                        };
+                };
+        };
+
+There are couple of ways to configure PWM device channels as shown in above
+example,
+1. The PWM device channel #0 is configured with only required device bindings.
+In this case, the qpnp-pwm driver does not configure any mode by default.
+
+2. The qpnp-pwm driver configures PWM device channel #1 with PWM feature
+configuration, but does not enable the channel since "qcom,mode-select" binding
+is not specified in the devicetree.
+
+3. Both the PWM and LPG configurations are provided for PWM device channel #4.
+The qpnp-pwm driver configures both the modes, but enables PWM mode at the probe
+time. It also sets the channel owner information for the channel.
diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
index cf727d9..ecac09d 100644
--- a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
+++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt
@@ -25,6 +25,9 @@
  - qcom,max-clk-gear: Maximum clock gear at which this controller can be run
 		 (range: 1-10)
 		 Default value will be 10 if this entry is not specified
+ - qcom,rxreg-access: This boolean indicates that slimbus RX should use direct
+		 register access to receive data. This flag is only needed if
+		 BAM pipe is not available to receive data from slimbus
 Example:
 	slim@fe12f000 {
 		cell-index = <1>;
@@ -35,4 +38,5 @@
 		interrupts = <0 163 0 0 164 0>;
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
 		qcom,min-clk-gear = <10>;
+		qcom,rxreg-access;
 	};
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 95ddf34..0516dff 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -49,3 +49,70 @@
 		qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
 		qcom,hsusb-otg-pmic-id-irq = <47>
 	};
+
+BAM:
+
+Required properties:
+- compatible: should be "qcom,usb-bam-msm"
+- regs: offset and length of the register set in the memory map
+- interrupts: IRQ line
+- qcom,usb-active-bam: active BAM type. Can be one of
+            0 - HSUSB_BAM
+            1 - HSIC_BAM
+- qcom,usb-total-bam-num: total number of BAMs that are supported
+- qcom,usb-bam-num-pipes: max number of pipes that can be used
+- qcom,usb-base-address: physical base address of the BAM
+
+A number of USB BAM pipe parameters are represented as sub-nodes:
+
+Subnode Required:
+- label: a string describing the pipe's direction and use
+- qcom,usb-bam-type: BAM type. Can be one of
+            0 - HSUSB_BAM
+            1 - HSIC_BAM
+- qcom,src-bam-physical-address: source BAM physical address
+- qcom,src-bam-pipe-index: source BAM pipe index
+- qcom,dst-bam-physical-address: destination BAM physical address
+- qcom,dst-bam-pipe-index: destination BAM pipe index
+- qcom,data-fifo-offset: data fifo offset address
+- qcom,data-fifo-size: data fifo size
+- qcom,descriptor-fifo-offset: descriptor fifo offset address
+- qcom,descriptor-fifo-size: descriptor fifo size
+
+Example USB BAM controller device node:
+
+	qcom,usbbam@f9304000 {
+		compatible = "qcom,usb-bam-msm";
+		reg = <0xf9304000 0x9000>;
+		interrupts = <0 132 0>;
+		qcom,usb-active-bam = <0>;
+		qcom,usb-total-bam-num = <1>;
+		qcom,usb-bam-num-pipes = <16>;
+		qcom,usb-base-address = <0xf9200000>;
+
+		qcom,pipe1 {
+			label = "usb-to-peri-qdss-dwc3";
+			qcom,usb-bam-type = <0>;
+			qcom,src-bam-physical-address = <0>;
+			qcom,src-bam-pipe-index = <0>;
+			qcom,dst-bam-physical-address = <0>;
+			qcom,dst-bam-pipe-index = <0>;
+			qcom,data-fifo-offset = <0>;
+			qcom,data-fifo-size = <0>;
+			qcom,descriptor-fifo-offset = <0>;
+			qcom,descriptor-fifo-size = <0>;
+		};
+
+		qcom,pipe2 {
+			label = "peri-to-usb-qdss-dwc3";
+			qcom,usb-bam-type = <0>;
+			qcom,src-bam-physical-address = <0xfc37C000>;
+			qcom,src-bam-pipe-index = <0>;
+			qcom,dst-bam-physical-address = <0xf9304000>;
+			qcom,dst-bam-pipe-index = <2>;
+			qcom,data-fifo-offset = <0xf0000>;
+			qcom,data-fifo-size = <0x4000>;
+			qcom,descriptor-fifo-offset = <0xf4000>;
+			qcom,descriptor-fifo-size = <0x1400>;
+		};
+	};
diff --git a/Documentation/usb/misc_ksbridge.txt b/Documentation/usb/misc_ksbridge.txt
new file mode 100644
index 0000000..f409dc1
--- /dev/null
+++ b/Documentation/usb/misc_ksbridge.txt
@@ -0,0 +1,46 @@
+Introduction
+--------------
+ksbridge is a simple misc device which bridges Kickstart application
+to HSIC h/w. Driver supports two instances, one instance for
+flash-less-boot/ram-dumps and other instance for EFS Sync.
+
+Initialization
+--------------
+Create two bridge instances and register for usb devices 0x9008 and
+0x9048/0x904C. Misc device name depends on the USB PID.
+For PID: 9008, misc device name is ks_bridge and for PID:9048/904C,
+misc device name is efs_bridge. After KS opens the misc device, IN
+URBs will be submitted to H/W; By default IN URBS are configured
+to 20.
+
+TX PATH
+-------
+Transmit path is very simple. Bridge driver will exposes write system
+call to kickstart. Data from write call will be put into a list and a
+work is scheduled to take the data from the list and write to HSIC.
+
+Functions:
+ksb_fs_write: System call invoked when kickstart writes the data
+ksb_tomdm_work: Work function which submits data to HSIC h/w.
+
+Data Structures:
+to_mdm_list: Data is stored in this list
+to_mdm_work: mapped to ksb_tomdm_work function
+
+RX PATH
+-------
+During initialization 20 IN URBs are submitted to hsic controller. In
+completion handler of each URB, buffer is de-queued and add to a list.
+Read function is woken-up. A new buffer is created and submitted to
+controller.
+
+Functions:
+ksb_fs_read: system call invoked by ks when it tries to read the data
+ksb_rx_cb: rx urb completion handler
+ksb_start_rx_work: function called during initialization.
+
+Data Structures:
+ks_wait_q: read system call will block on this queue until data is
+available or device is disconnected
+to_ks_list: data queued to this list by rx urb completion handler,
+later de-queued by read system call.
diff --git a/arch/arm/boot/dts/msm8974-camera.dtsi b/arch/arm/boot/dts/msm8974-camera.dtsi
new file mode 100644
index 0000000..0375e93
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-camera.dtsi
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	qcom,cam_server {
+		compatible = "qcom,cam_server";
+		reg = <0xfd8C0000 0x10000>;
+		reg-names = "server";
+	};
+	qcom,csiphy@fda0ac00 {
+		cell-index = <0>;
+		compatible = "qcom,csiphy";
+		reg = <0xfda0ac00 0x200>;
+		reg-names = "csiphy";
+		interrupts = <0 78 0>;
+		interrupt-names = "csiphy";
+	};
+	qcom,csiphy@fda0b000 {
+		cell-index = <1>;
+		compatible = "qcom,csiphy";
+		reg = <0xfda0b000 0x200>;
+		reg-names = "csiphy";
+		interrupts = <0 79 0>;
+		interrupt-names = "csiphy";
+	};
+	qcom,csiphy@fda0b400 {
+		cell-index = <2>;
+		compatible = "qcom,csiphy";
+		reg = <0xfda0b400 0x200>;
+		reg-names = "csiphy";
+		interrupts = <0 80 0>;
+		interrupt-names = "csiphy";
+	};
+	qcom,csid@fda08000  {
+		cell-index = <0>;
+		compatible = "qcom,csid";
+		reg = <0xfda08000 0x100>;
+		reg-names = "csid";
+		interrupts = <0 51 0>;
+		interrupt-names = "csid";
+	};
+	qcom,csid@fda08400 {
+		cell-index = <1>;
+		compatible = "qcom,csid";
+		reg = <0xfda08400 0x100>;
+		reg-names = "csid";
+		interrupts = <0 52 0>;
+		interrupt-names = "csid";
+	};
+	qcom,csid@fda08800 {
+		cell-index = <2>;
+		compatible = "qcom,csid";
+		reg = <0xfda08800 0x100>;
+		reg-names = "csid";
+		interrupts = <0 53 0>;
+		interrupt-names = "csid";
+	};
+	qcom,csid@fda08C00 {
+		cell-index = <3>;
+		compatible = "qcom,csid";
+		reg = <0xfda08C00 0x100>;
+		reg-names = "csid";
+		interrupts = <0 54 0>;
+		interrupt-names = "csid";
+	};
+	qcom,ispif@fda0A000 {
+		cell-index = <0>;
+		compatible = "qcom,ispif";
+		reg = <0xfda0A000 0x300>;
+		reg-names = "ispif";
+		interrupts = <0 55 0>;
+		interrupt-names = "ispif";
+	};
+	qcom,cci@fda0C000 {
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		reg = <0xfda0C000 0x1000>;
+		reg-names = "cci";
+		interrupts = <0 50 0>;
+		interrupt-names = "cci";
+	};
+	qcom,vfe@fda10000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe40";
+		reg = <0xfda10000 0x1000>;
+		reg-names = "vfe";
+		interrupts = <0 57 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+	};
+	qcom,vfe@fda14000 {
+		cell-index = <1>;
+		compatible = "qcom,vfe40";
+		reg = <0xfda14000 0x1000>;
+		reg-names = "vfe";
+		interrupts = <0 58 0>;
+		interrupt-names = "vfe";
+		vdd-supply = <&gdsc_vfe>;
+	};
+	qcom,jpeg@fda1c000 {
+		cell-index = <0>;
+		compatible = "qcom,jpeg";
+		reg = <0xfda1c000 0x400>;
+		reg-names = "jpeg";
+		interrupts = <0 59 0>;
+		interrupt-names = "jpeg";
+	};
+	qcom,jpeg@fda20000 {
+		cell-index = <1>;
+		compatible = "qcom,jpeg";
+		reg = <0xfda20000 0x400>;
+		reg-names = "jpeg";
+		interrupts = <0 60 0>;
+		interrupt-names = "jpeg";
+	};
+	qcom,jpeg@fda24000 {
+		cell-index = <2>;
+		compatible = "qcom,jpeg";
+		reg = <0xfda24000 0x400>;
+		reg-names = "jpeg";
+		interrupts = <0 61 0>;
+		interrupt-names = "jpeg";
+	};
+	qcom,irqrouter@fda00000 {
+		cell-index = <0>;
+		compatible = "qcom,irqrouter";
+		reg = <0xfda00000 0x100>;
+		reg-names = "irqrouter";
+	};
+	qcom,cpp@fda04000 {
+		cell-index = <0>;
+		compatible = "qcom,cpp";
+		reg = <0xfda04000 0x100>;
+		reg-names = "cpp";
+		interrupts = <0 49 0>;
+		interrupt-names = "cpp";
+		vdd-supply = <&gdsc_vfe>;
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-ion.dtsi b/arch/arm/boot/dts/msm8974-ion.dtsi
new file mode 100644
index 0000000..1893ae4
--- /dev/null
+++ b/arch/arm/boot/dts/msm8974-ion.dtsi
@@ -0,0 +1,76 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ion-heap@30 { /* SYSTEM HEAP */
+			reg = <30>;
+		};
+
+		qcom,ion-heap@8 { /* CP_MM HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <8>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x7800000>;
+		};
+
+		qcom,ion-heap@29 { /* FIRMWARE HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <29>;
+			qcom,heap-align = <0x20000>;
+			qcom,heap-adjacent = <8>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0xA00000>;
+		};
+
+		qcom,ion-heap@12 { /* MFC HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <12>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x2000>;
+		};
+
+		qcom,ion-heap@24 { /* SF HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <24>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x2800000>;
+		};
+
+		qcom,ion-heap@25 { /* IOMMU HEAP */
+			reg = <25>;
+		};
+
+		qcom,ion-heap@27 { /* QSECOM HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <27>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x600000>;
+		};
+
+		qcom,ion-heap@28 { /* AUDIO HEAP */
+			compatible = "qcom,msm-ion-reserve";
+			reg = <28>;
+			qcom,heap-align = <0x1000>;
+			qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+			qcom,memory-reservation-size = <0x2B4000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index b376544..a187223 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -127,9 +127,9 @@
 	rpm-regulator-smpb3 {
 		status = "okay";
 		pm8841_s3: regulator-s3 {
-			regulator-min-microvolt = <1150000>;
-			regulator-max-microvolt = <1150000>;
-			qcom,init-voltage = <1150000>;
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1050000>;
+			qcom,init-voltage = <1050000>;
 			status = "okay";
 		};
 	};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 12f46a3..f144421 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -13,7 +13,9 @@
 /include/ "skeleton.dtsi"
 /include/ "msm8974_pm.dtsi"
 /include/ "msm8974-iommu.dtsi"
+/include/ "msm8974-camera.dtsi"
 /include/ "msm-gdsc.dtsi"
+/include/ "msm8974-ion.dtsi"
 /include/ "msm8974-gpu.dtsi"
 
 / {
@@ -214,6 +216,7 @@
 		interrupts = <0 163 0 0 164 0>;
 		interrupt-names = "slimbus_irq", "slimbus_bam_irq";
 		qcom,min-clk-gear = <10>;
+		qcom,rxreg-access;
 	};
 
 	spmi_bus: qcom,spmi@fc4c0000 {
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index 314f91b..00325c9 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -218,7 +218,6 @@
 # CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set
 CONFIG_DIAG_CHAR=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_DCC_TTY=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 # CONFIG_I2C_MSM is not set
diff --git a/arch/arm/configs/msm7630_defconfig b/arch/arm/configs/msm7630_defconfig
index aad13b8..5c5a152 100644
--- a/arch/arm/configs/msm7630_defconfig
+++ b/arch/arm/configs/msm7630_defconfig
@@ -238,7 +238,6 @@
 CONFIG_SERIAL_MSM_HS=y
 CONFIG_DIAG_CHAR=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_DCC_TTY=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QUP=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index 5d19237..0efe658 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -282,7 +282,6 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM=y
-CONFIG_DCC_TTY=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 # CONFIG_I2C_MSM is not set
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 795a5a7..1f2e285 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -233,6 +233,9 @@
 CONFIG_BT_BNEP_PROTO_FILTER=y
 CONFIG_BT_HIDP=y
 CONFIG_BT_HCISMD=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=m
 # CONFIG_CFG80211_WEXT is not set
 CONFIG_RFKILL=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 4674584..8c79847 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -237,6 +237,9 @@
 CONFIG_BT_BNEP_PROTO_FILTER=y
 CONFIG_BT_HIDP=y
 CONFIG_BT_HCISMD=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_MSM_BT_POWER=y
 CONFIG_CFG80211=m
 # CONFIG_CFG80211_WEXT is not set
 CONFIG_RFKILL=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index f3a62a5..6f4db42 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -52,6 +52,10 @@
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
 CONFIG_MSM_OCMEM=y
 CONFIG_MSM_MEMORY_DUMP=y
+CONFIG_MSM_CACHE_ERP=y
+CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS=y
+CONFIG_MSM_L2_ERP_2BIT_PANIC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_SMP=y
@@ -160,6 +164,7 @@
 # CONFIG_RADIO_ADAPTERS is not set
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_MSM_KGSL=y
 CONFIG_FB=y
 CONFIG_FB_MSM=y
 # CONFIG_FB_MSM_BACKLIGHT is not set
@@ -239,8 +244,6 @@
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
 CONFIG_KEYS=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_SHA256=y
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 94aa75e..bc81696 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,9 +37,11 @@
 #endif
 
 /*
- * The fixup involves disabling interrupts during execution of the WFE
- * instruction. This could potentially lead to deadlock if a thread is trying
- * to acquire a spinlock which is being released from an interrupt context.
+ * The fixup involves disabling FIQs during execution of the WFE instruction.
+ * This could potentially lead to deadlock if a thread is trying to acquire a
+ * spinlock which is being released from an FIQ. This should not be a problem
+ * because FIQs are handled by the secure environment and do not directly
+ * manipulate spinlocks.
  */
 #ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
 #define WFE_SAFE(fixup, tmp) 				\
@@ -47,7 +49,7 @@
 "	cmp	" fixup ", #0\n"			\
 "	wfeeq\n"					\
 "	beq	10f\n"					\
-"	cpsid	if\n"					\
+"	cpsid   f\n"					\
 "	mrc	p15, 7, " fixup ", c15, c0, 5\n"	\
 "	bic	" fixup ", " fixup ", #0x10000\n"	\
 "	mcr	p15, 7, " fixup ", c15, c0, 5\n"	\
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7c44acd..7a8c2d6 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -966,7 +966,7 @@
  * SP points to a minimal amount of processor-private memory, the address
  * of which is copied into r0 for the mode specific abort handler.
  */
-	.macro	vector_stub, name, mode, correction=0
+	.macro	vector_stub, name, mode, fixup, correction=0
 	.align	5
 
 vector_\name:
@@ -995,6 +995,18 @@
 	and	lr, lr, #0x0f
  THUMB(	adr	r0, 1f			)
  THUMB(	ldr	lr, [r0, lr, lsl #2]	)
+	.if	\fixup
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+	ldr	r0, .krait_fixup
+	ldr	r0, [r0]
+	cmp	r0, #0
+	beq	10f
+	mrc	p15, 7, r0, c15, c0, 5
+	orr	r0, r0, #0x10000
+	mcr	p15, 7, r0, c15, c0, 5
+10:	isb
+#endif
+	.endif
 	mov	r0, sp
  ARM(	ldr	lr, [pc, lr, lsl #2]	)
 	movs	pc, lr			@ branch to handler in SVC mode
@@ -1010,7 +1022,7 @@
 /*
  * Interrupt dispatcher
  */
-	vector_stub	irq, IRQ_MODE, 4
+	vector_stub	irq, IRQ_MODE, 1, 4
 
 	.long	__irq_usr			@  0  (USR_26 / USR_32)
 	.long	__irq_invalid			@  1  (FIQ_26 / FIQ_32)
@@ -1033,7 +1045,7 @@
  * Data abort dispatcher
  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  */
-	vector_stub	dabt, ABT_MODE, 8
+	vector_stub	dabt, ABT_MODE, 0, 8
 
 	.long	__dabt_usr			@  0  (USR_26 / USR_32)
 	.long	__dabt_invalid			@  1  (FIQ_26 / FIQ_32)
@@ -1056,7 +1068,7 @@
  * Prefetch abort dispatcher
  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  */
-	vector_stub	pabt, ABT_MODE, 4
+	vector_stub	pabt, ABT_MODE, 0, 4
 
 	.long	__pabt_usr			@  0 (USR_26 / USR_32)
 	.long	__pabt_invalid			@  1 (FIQ_26 / FIQ_32)
@@ -1079,7 +1091,7 @@
  * Undef instr entry dispatcher
  * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  */
-	vector_stub	und, UND_MODE
+	vector_stub	und, UND_MODE, 0
 
 	.long	__und_usr			@  0 (USR_26 / USR_32)
 	.long	__und_invalid			@  1 (FIQ_26 / FIQ_32)
@@ -1131,6 +1143,8 @@
 
 .LCvswi:
 	.word	vector_swi
+.krait_fixup:
+	.word	msm_krait_need_wfe_fixup
 
 	.globl	__stubs_end
 __stubs_end:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 28d6e60..ca3e996 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -651,7 +651,8 @@
 
 	cpumask_copy(&mask, cpu_online_mask);
 	cpumask_clear_cpu(smp_processor_id(), &mask);
-	smp_cross_call(&mask, IPI_CPU_STOP);
+	if (!cpumask_empty(&mask))
+		smp_cross_call(&mask, IPI_CPU_STOP);
 
 	/* Wait up to one second for other CPUs to stop */
 	timeout = USEC_PER_SEC;
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 54f3292..1665abd 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -2298,6 +2298,33 @@
 	  Enable support for On-Chip Memory available on certain MSM chipsets.
 	  OCMEM is a low latency, high performance pool shared by subsystems.
 
+config MSM_OCMEM_LOCAL_POWER_CTRL
+	bool "OCMEM Local Power Control"
+	depends on MSM_OCMEM
+	help
+	  Enable direct power management of the OCMEM core by the
+	  OCMEM driver. By default power management is delegated to
+	  the RPM. Selecting this option causes the OCMEM driver to
+	  directly handle the various macro power transitions.
+
+config MSM_OCMEM_DEBUG
+	bool "OCMEM Debug Support"
+	depends on MSM_OCMEM
+	help
+	  Enable debug options for On-chip Memory (OCMEM) driver.
+	  Various debug options include memory, power and latency.
+	  Choosing one of these options allows debugging of each
+	  individual subsystem separately.
+
+config MSM_OCMEM_POWER_DEBUG
+	bool "OCMEM Power Debug Support"
+	depends on MSM_OCMEM_DEBUG
+	help
+	  Enable debug support for OCMEM power management.
+	  This adds support for verifying all power management
+	  related operations of OCMEM. Both local power management
+	  and RPM assisted power management operations are supported.
+
 config MSM_RTB
 	bool "Register tracing"
 	help
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 33b153f..b96ccec 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -251,7 +251,7 @@
 obj-$(CONFIG_MACH_MSM7627A_QRD1) += board-qrd7627a.o board-7627a-all.o
 obj-$(CONFIG_MACH_MSM7627A_QRD3) += board-qrd7627a.o board-7627a-all.o
 obj-$(CONFIG_MACH_MSM7627A_EVB) += board-qrd7627a.o board-7627a-all.o
-obj-$(CONFIG_ARCH_MSM8625) += devices-msm7x27a.o clock-pcom-lookup.o mpm-8625.o
+obj-$(CONFIG_ARCH_MSM8625) += msm_smem_iface.o devices-msm7x27a.o clock-pcom-lookup.o mpm-8625.o
 obj-$(CONFIG_MACH_MSM8625_RUMI3) += board-msm7x27a.o
 obj-$(CONFIG_MACH_MSM8625_SURF) +=  board-msm7x27a.o board-7627a-all.o
 obj-$(CONFIG_MACH_MSM8625_EVB) +=  board-qrd7627a.o board-7627a-all.o
@@ -336,7 +336,7 @@
 obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
 endif
 obj-$(CONFIG_MSM_OCMEM) += ocmem.o ocmem_allocator.o ocmem_notifier.o
-obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o
+obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o ocmem_core.o
 
 obj-$(CONFIG_ARCH_MSM7X27) += gpiomux-7x27.o gpiomux-v1.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index 639cc94..09a1be7 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -249,14 +249,16 @@
 /* 8625 PLL4 @ 1209MHz with GSM capable modem */
 static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1209[] = {
 	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
-	{ 0, 61440, ACPU_PLL_1, 1, 3,  7680, 3, 1, 61440 },
-	{ 1, 122880, ACPU_PLL_1, 1, 1,  15360, 3, 2, 61440 },
-	{ 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 3, 61440 },
-	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
-	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
-	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
-	{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
-	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
+	{ 0, 61440, ACPU_PLL_1, 1, 3,  7680, 3, 0, 61440 },
+	{ 0, 122880, ACPU_PLL_1, 1, 1,  15360, 3, 1, 61440 },
+	{ 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 1, 61440 },
+	{ 0, 300000, ACPU_PLL_2, 2, 3, 37500, 3, 2, 122880 },
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+	{ 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+	{ 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
 	{ 0 }
 };
 
@@ -264,13 +266,14 @@
 static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1209[] = {
 	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 24576 },
 	{ 0, 65536, ACPU_PLL_1, 1, 3,  8192, 3, 1, 49152 },
-	{ 1, 98304, ACPU_PLL_1, 1, 1,  12288, 3, 2, 49152 },
+	{ 0, 98304, ACPU_PLL_1, 1, 1,  12288, 3, 2, 49152 },
 	{ 1, 196608, ACPU_PLL_1, 1, 0, 24576, 3, 3, 98304 },
-	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 4, 122880 },
-	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
-	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
-	{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
-	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+	{ 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+	{ 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
 	{ 0 }
 };
 
@@ -725,7 +728,7 @@
 		if ((delta > drv_state.max_speed_delta_khz)
 				|| (strt_s->pll == ACPU_PLL_4 &&
 					tgt_s->pll == ACPU_PLL_4))
-			clk_disable_unprepare(pll_clk[backup_s->pll].clk);
+			clk_disable(pll_clk[backup_s->pll].clk);
 
 		goto done;
 	}
@@ -959,17 +962,31 @@
 		}
 	}
 
+	if (acpu_freq_tbl == NULL) {
+		pr_crit("Unknown PLL configuration!\n");
+		BUG();
+	}
+
 	/*
-	 * When PLL4 can run max @ 1401.6MHz, we have to support
-	 * dynamic reprograming of PLL4.
-	 *
+	 * Turn ON the dynamic reprogramming method
+	 * if one of the table entry has pll_rate defined.
+	 */
+	for ( ; t->tbl->a11clk_khz; t->tbl++) {
+		if (t->tbl->pll_rate) {
+			if (!dynamic_reprogram) {
+				dynamic_reprogram = 1;
+				pr_info("Dynamic reprogramming is ON\n");
+			}
+		}
+	}
+
+	/*
 	 * Also find the backup pll used during PLL4 reprogramming.
 	 * We are using PLL2@600MHz as backup PLL, since 800MHz jump
 	 * is fine.
 	 */
-	if (t->pll4_rate == 1401) {
-		dynamic_reprogram = 1;
-		for ( ; t->tbl->a11clk_khz; t->tbl++) {
+	if (dynamic_reprogram) {
+		for (t->tbl = acpu_freq_tbl; t->tbl->a11clk_khz; t->tbl++) {
 			if (t->tbl->pll == ACPU_PLL_2 &&
 					t->tbl->a11clk_src_div == 1) {
 				backup_s = t->tbl;
@@ -977,11 +994,6 @@
 			}
 		}
 	}
-
-	if (acpu_freq_tbl == NULL) {
-		pr_crit("Unknown PLL configuration!\n");
-		BUG();
-	}
 }
 
 /*
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 8c89014..22275b4 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -23,7 +23,7 @@
 #include "acpuclock-krait.h"
 
 /* Corner type vreg VDD values */
-#define LVL_NONE	RPM_REGULATOR_CORNER_RETENTION
+#define LVL_NONE	RPM_REGULATOR_CORNER_NONE
 #define LVL_LOW		RPM_REGULATOR_CORNER_SVS_SOC
 #define LVL_NOM		RPM_REGULATOR_CORNER_NORMAL
 #define LVL_HIGH	RPM_REGULATOR_CORNER_SUPER_TURBO
@@ -94,11 +94,10 @@
 };
 
 static struct msm_bus_paths bw_level_tbl[] __initdata = {
-	[0] =  BW_MBPS(400), /* At least  50 MHz on bus. */
-	[1] =  BW_MBPS(800), /* At least 100 MHz on bus. */
-	[2] = BW_MBPS(1334), /* At least 167 MHz on bus. */
-	[3] = BW_MBPS(2666), /* At least 200 MHz on bus. */
-	[4] = BW_MBPS(3200), /* At least 333 MHz on bus. */
+	[0] =  BW_MBPS(552), /* At least  69 MHz on bus. */
+	[1] = BW_MBPS(1112), /* At least 139 MHz on bus. */
+	[2] = BW_MBPS(2224), /* At least 278 MHz on bus. */
+	[3] = BW_MBPS(4448), /* At least 556 MHz on bus. */
 };
 
 static struct msm_bus_scale_pdata bus_scale_data __initdata = {
@@ -109,31 +108,59 @@
 };
 
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {  300000, PLL_0, 0, 2,   0 }, LVL_LOW, 1050000, 2 },
-	[1]  = { {  384000, HFPLL, 2, 0,  40 }, LVL_NOM, 1050000, 2 },
-	[2]  = { {  460800, HFPLL, 2, 0,  48 }, LVL_NOM, 1050000, 2 },
-	[3]  = { {  537600, HFPLL, 1, 0,  28 }, LVL_NOM, 1050000, 2 },
-	[4]  = { {  576000, HFPLL, 1, 0,  30 }, LVL_NOM, 1050000, 3 },
-	[5]  = { {  652800, HFPLL, 1, 0,  34 }, LVL_NOM, 1050000, 3 },
-	[6]  = { {  729600, HFPLL, 1, 0,  38 }, LVL_NOM, 1050000, 3 },
-	[7]  = { {  806400, HFPLL, 1, 0,  42 }, LVL_NOM, 1050000, 3 },
-	[8]  = { {  883200, HFPLL, 1, 0,  46 }, LVL_NOM, 1050000, 4 },
-	[9]  = { {  960000, HFPLL, 1, 0,  50 }, LVL_NOM, 1050000, 4 },
-	[10] = { { 1036800, HFPLL, 1, 0,  54 }, LVL_NOM, 1050000, 4 },
+	[0]  = { {  300000, PLL_0, 0, 2,   0 }, LVL_LOW,   950000, 0 },
+	[1]  = { {  384000, HFPLL, 2, 0,  40 }, LVL_NOM,   950000, 1 },
+	[2]  = { {  460800, HFPLL, 2, 0,  48 }, LVL_NOM,   950000, 1 },
+	[3]  = { {  537600, HFPLL, 1, 0,  28 }, LVL_NOM,   950000, 2 },
+	[4]  = { {  576000, HFPLL, 1, 0,  30 }, LVL_NOM,   950000, 2 },
+	[5]  = { {  652800, HFPLL, 1, 0,  34 }, LVL_NOM,   950000, 2 },
+	[6]  = { {  729600, HFPLL, 1, 0,  38 }, LVL_NOM,   950000, 2 },
+	[7]  = { {  806400, HFPLL, 1, 0,  42 }, LVL_NOM,   950000, 2 },
+	[8]  = { {  883200, HFPLL, 1, 0,  46 }, LVL_HIGH, 1050000, 2 },
+	[9]  = { {  960000, HFPLL, 1, 0,  50 }, LVL_HIGH, 1050000, 2 },
+	[10] = { { 1036800, HFPLL, 1, 0,  54 }, LVL_HIGH, 1050000, 3 },
+	[11] = { { 1113600, HFPLL, 1, 0,  58 }, LVL_HIGH, 1050000, 3 },
+	[12] = { { 1190400, HFPLL, 1, 0,  62 }, LVL_HIGH, 1050000, 3 },
+	[13] = { { 1267200, HFPLL, 1, 0,  66 }, LVL_HIGH, 1050000, 3 },
+	[14] = { { 1344000, HFPLL, 1, 0,  70 }, LVL_HIGH, 1050000, 3 },
+	[15] = { { 1420800, HFPLL, 1, 0,  74 }, LVL_HIGH, 1050000, 3 },
+	[16] = { { 1497600, HFPLL, 1, 0,  78 }, LVL_HIGH, 1050000, 3 },
+	[17] = { { 1574400, HFPLL, 1, 0,  82 }, LVL_HIGH, 1050000, 3 },
+	[18] = { { 1651200, HFPLL, 1, 0,  86 }, LVL_HIGH, 1050000, 3 },
+	[19] = { { 1728000, HFPLL, 1, 0,  90 }, LVL_HIGH, 1050000, 3 },
+	[20] = { { 1804800, HFPLL, 1, 0,  94 }, LVL_HIGH, 1050000, 3 },
+	[21] = { { 1881600, HFPLL, 1, 0,  98 }, LVL_HIGH, 1050000, 3 },
+	[22] = { { 1958400, HFPLL, 1, 0, 102 }, LVL_HIGH, 1050000, 3 },
+	[23] = { { 2035200, HFPLL, 1, 0, 106 }, LVL_HIGH, 1050000, 3 },
+	[24] = { { 2112000, HFPLL, 1, 0, 110 }, LVL_HIGH, 1050000, 3 },
+	[25] = { { 2188800, HFPLL, 1, 0, 114 }, LVL_HIGH, 1050000, 3 },
 };
 
 static struct acpu_level acpu_freq_tbl[] __initdata = {
-	{ 1, {  300000, PLL_0, 0, 2,   0 }, L2(0),  1050000, 3200000 },
-	{ 1, {  384000, HFPLL, 2, 0,  40 }, L2(1),  1050000, 3200000 },
-	{ 1, {  460800, HFPLL, 2, 0,  48 }, L2(2),  1050000, 3200000 },
-	{ 1, {  537600, HFPLL, 1, 0,  28 }, L2(3),  1050000, 3200000 },
-	{ 1, {  576000, HFPLL, 1, 0,  30 }, L2(4),  1050000, 3200000 },
-	{ 1, {  652800, HFPLL, 1, 0,  34 }, L2(5),  1050000, 3200000 },
-	{ 1, {  729600, HFPLL, 1, 0,  38 }, L2(6),  1050000, 3200000 },
-	{ 1, {  806400, HFPLL, 1, 0,  42 }, L2(7),  1050000, 3200000 },
-	{ 1, {  883200, HFPLL, 1, 0,  46 }, L2(8),  1050000, 3200000 },
-	{ 1, {  960000, HFPLL, 1, 0,  50 }, L2(9),  1050000, 3200000 },
-	{ 1, { 1036800, HFPLL, 1, 0,  54 }, L2(10), 1050000, 3200000 },
+	{ 1, {  300000, PLL_0, 0, 2,   0 }, L2(0),   950000, 3200000 },
+	{ 1, {  384000, HFPLL, 2, 0,  40 }, L2(3),   950000, 3200000 },
+	{ 1, {  460800, HFPLL, 2, 0,  48 }, L2(3),   950000, 3200000 },
+	{ 1, {  537600, HFPLL, 1, 0,  28 }, L2(5),   950000, 3200000 },
+	{ 1, {  576000, HFPLL, 1, 0,  30 }, L2(5),   950000, 3200000 },
+	{ 1, {  652800, HFPLL, 1, 0,  34 }, L2(5),   950000, 3200000 },
+	{ 1, {  729600, HFPLL, 1, 0,  38 }, L2(5),   950000, 3200000 },
+	{ 1, {  806400, HFPLL, 1, 0,  42 }, L2(7),   950000, 3200000 },
+	{ 1, {  883200, HFPLL, 1, 0,  46 }, L2(7),   950000, 3200000 },
+	{ 1, {  960000, HFPLL, 1, 0,  50 }, L2(7),   950000, 3200000 },
+	{ 1, { 1036800, HFPLL, 1, 0,  54 }, L2(7),   950000, 3200000 },
+	{ 0, { 1113600, HFPLL, 1, 0,  58 }, L2(12), 1050000, 3200000 },
+	{ 0, { 1190400, HFPLL, 1, 0,  62 }, L2(12), 1050000, 3200000 },
+	{ 0, { 1267200, HFPLL, 1, 0,  66 }, L2(12), 1050000, 3200000 },
+	{ 0, { 1344000, HFPLL, 1, 0,  70 }, L2(15), 1050000, 3200000 },
+	{ 0, { 1420800, HFPLL, 1, 0,  74 }, L2(15), 1050000, 3200000 },
+	{ 0, { 1497600, HFPLL, 1, 0,  78 }, L2(15), 1050000, 3200000 },
+	{ 0, { 1574400, HFPLL, 1, 0,  82 }, L2(20), 1050000, 3200000 },
+	{ 0, { 1651200, HFPLL, 1, 0,  86 }, L2(20), 1050000, 3200000 },
+	{ 0, { 1728000, HFPLL, 1, 0,  90 }, L2(20), 1050000, 3200000 },
+	{ 0, { 1804800, HFPLL, 1, 0,  94 }, L2(25), 1050000, 3200000 },
+	{ 0, { 1881600, HFPLL, 1, 0,  98 }, L2(25), 1050000, 3200000 },
+	{ 0, { 1958400, HFPLL, 1, 0, 102 }, L2(25), 1050000, 3200000 },
+	{ 0, { 1996800, HFPLL, 1, 0, 104 }, L2(25), 1050000, 3200000 },
 	{ 0, { 0 } }
 };
 
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 1c19442..7898cf6 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -788,7 +788,7 @@
 static struct gpiomux_setting mdm2ap_status_cfg = {
 	.func = GPIOMUX_FUNC_GPIO,
 	.drv = GPIOMUX_DRV_8MA,
-	.pull = GPIOMUX_PULL_NONE,
+	.pull = GPIOMUX_PULL_DOWN,
 };
 
 static struct gpiomux_setting mdm2ap_errfatal_cfg = {
@@ -828,6 +828,7 @@
 	{
 		.gpio = 49,
 		.settings = {
+			[GPIOMUX_ACTIVE] = &mdm2ap_status_cfg,
 			[GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg,
 		}
 	},
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 879434d..e77e7c0 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -141,6 +141,12 @@
 	PM8921_GPIO_INPUT(17, PM_GPIO_PULL_UP_1P5),	/* SD_WP */
 };
 
+static struct pm8xxx_gpio_init pm8921_mpq_gpios[] __initdata = {
+	PM8921_GPIO_INIT(27, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0,
+			PM_GPIO_PULL_NO, PM_GPIO_VIN_VPH, PM_GPIO_STRENGTH_NO,
+			PM_GPIO_FUNC_NORMAL, 0, 0),
+};
+
 /* Initial PM8XXX MPP configurations */
 static struct pm8xxx_mpp_init pm8xxx_mpps[] __initdata = {
 	PM8921_MPP_INIT(3, D_OUTPUT, PM8921_MPP_DIG_LEVEL_VPH, DOUT_CTRL_LOW),
@@ -186,6 +192,18 @@
 			}
 		}
 
+	if (machine_is_mpq8064_cdp() || machine_is_mpq8064_hrd()
+					|| machine_is_mpq8064_dtv())
+		for (i = 0; i < ARRAY_SIZE(pm8921_mpq_gpios); i++) {
+			rc = pm8xxx_gpio_config(pm8921_mpq_gpios[i].gpio,
+						&pm8921_mpq_gpios[i].config);
+			if (rc) {
+				pr_err("%s: pm8xxx_gpio_config: rc=%d\n",
+					__func__, rc);
+				break;
+			}
+		}
+
 	for (i = 0; i < ARRAY_SIZE(pm8xxx_mpps); i++) {
 		rc = pm8xxx_mpp_config(pm8xxx_mpps[i].mpp,
 					&pm8xxx_mpps[i].config);
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 90563ad..da849c8 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -888,6 +888,8 @@
 	-1
 };
 
+#define PMIC_GPIO_DP		27    /* PMIC GPIO for D+ change */
+#define PMIC_GPIO_DP_IRQ	PM8921_GPIO_IRQ(PM8921_IRQ_BASE, PMIC_GPIO_DP)
 static struct msm_otg_platform_data msm_otg_pdata = {
 	.mode			= USB_OTG,
 	.otg_control		= OTG_PMIC_CONTROL,
@@ -913,6 +915,9 @@
 		if (machine_is_apq8064_liquid())
 			msm_ehci_host_pdata3.dock_connect_irq =
 					PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
+		else
+			msm_ehci_host_pdata3.pmic_gpio_dp_irq =
+							PMIC_GPIO_DP_IRQ;
 
 		apq8064_device_ehci_host3.dev.platform_data =
 				&msm_ehci_host_pdata3;
@@ -1727,6 +1732,12 @@
 	.mdm2ap_vddmin_gpio = 80,
 };
 
+static struct gpiomux_setting mdm2ap_status_gpio_run_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_8MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
 static struct mdm_platform_data mdm_platform_data = {
 	.mdm_version = "3.0",
 	.ramdump_delay_ms = 2000,
@@ -1735,6 +1746,7 @@
 	.vddmin_resource = &mdm_vddmin_rscs,
 	.peripheral_platform_device = &apq8064_device_hsic_host,
 	.ramdump_timeout_ms = 120000,
+	.mdm2ap_status_gpio_run_cfg = &mdm2ap_status_gpio_run_cfg,
 };
 
 static struct tsens_platform_data apq_tsens_pdata  = {
@@ -2252,7 +2264,6 @@
 	&msm_bus_8064_cpss_fpb,
 	&apq8064_msm_device_vidc,
 	&msm_pil_dsps,
-	&msm_8960_riva,
 	&msm_8960_q6_lpass,
 	&msm_pil_vidc,
 	&msm_gss,
@@ -2942,7 +2953,10 @@
 	platform_device_register(&apq8064_slim_ctrl);
 	slim_register_board_info(apq8064_slim_devices,
 		ARRAY_SIZE(apq8064_slim_devices));
-	apq8064_init_dsps();
+	if (!PLATFORM_IS_MPQ8064()) {
+		apq8064_init_dsps();
+		platform_device_register(&msm_8960_riva);
+	}
 	msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
 	msm_spm_l2_init(msm_spm_l2_data);
 	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
diff --git a/arch/arm/mach-msm/board-8960-gpiomux.c b/arch/arm/mach-msm/board-8960-gpiomux.c
index 67be99a..1771bb9 100644
--- a/arch/arm/mach-msm/board-8960-gpiomux.c
+++ b/arch/arm/mach-msm/board-8960-gpiomux.c
@@ -55,6 +55,19 @@
 	.pull = GPIOMUX_PULL_NONE,
 };
 
+static struct gpiomux_setting gsbi6_active_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_NONE,
+};
+
+static struct gpiomux_setting gsbi6_suspended_cfg = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_DOWN,
+};
+
+
 static struct gpiomux_setting external_vfr[] = {
 	/* Suspended state */
 	{
@@ -436,6 +449,27 @@
 		},
 	},
 	{
+		.gpio      = 27,        /* GSBI6 BT_INT2AP_N for AR3002 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg,
+			[GPIOMUX_ACTIVE]    = &gsbi6_active_cfg,
+		},
+	},
+	{
+		.gpio      = 28,        /* GSBI6 BT_EN for AR3002 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg,
+			[GPIOMUX_ACTIVE]    = &gsbi6_active_cfg,
+		},
+	},
+	{
+		.gpio      = 29,        /* GSBI6 BT_WAKE for AR3002 */
+		.settings = {
+			[GPIOMUX_SUSPENDED] = &gsbi6_suspended_cfg,
+			[GPIOMUX_ACTIVE]    = &gsbi6_active_cfg,
+		},
+	},
+	{
 		.gpio      = 44,	/* GSBI12 I2C QUP SDA */
 		.settings = {
 			[GPIOMUX_SUSPENDED] = &gsbi12,
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index dc28b83..50a5ed2 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2508,6 +2508,88 @@
 static struct msm_serial_hs_platform_data msm_uart_dm9_pdata;
 #endif
 
+#if defined(CONFIG_BT) && defined(CONFIG_BT_HCIUART_ATH3K)
+static struct resource bluesleep_resources[] = {
+	{
+		.name   = "gpio_host_wake",
+		.start  = 27,
+		.end    = 27,
+		.flags  = IORESOURCE_IO,
+	},
+	{
+		.name   = "gpio_ext_wake",
+		.start  = 29,
+		.end    = 29,
+		.flags  = IORESOURCE_IO,
+	},
+	{
+		.name   = "host_wake",
+		.start  = MSM_GPIO_TO_INT(27),
+		.end    = MSM_GPIO_TO_INT(27),
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device msm_bluesleep_device = {
+	.name		= "bluesleep",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(bluesleep_resources),
+	.resource	= bluesleep_resources,
+};
+
+static struct platform_device msm_bt_power_device = {
+	.name = "bt_power",
+};
+
+int gpio_bt_sys_rest_en = 28;
+
+static int bluetooth_power(int on)
+{
+	int rc;
+
+	if (on) {
+		rc = gpio_direction_output(gpio_bt_sys_rest_en, 1);
+		msleep(100);
+	} else {
+		gpio_set_value(gpio_bt_sys_rest_en, 0);
+		rc = gpio_direction_input(gpio_bt_sys_rest_en);
+		msleep(100);
+	}
+	pr_err("%s on= %d rc = %d\n", __func__, on, rc);
+	return 0;
+}
+
+static void __init bt_power_init(void)
+{
+	int rc;
+
+	msm_bt_power_device.dev.platform_data = &bluetooth_power;
+	pr_err("%s enter\n", __func__);
+
+	rc = gpio_request(gpio_bt_sys_rest_en, "bt sys_rst_n");
+	if (rc) {
+		pr_err("%s: unable to request gpio %d (%d)\n",
+			__func__, gpio_bt_sys_rest_en, rc);
+		return;
+	}
+
+	/* When booting up, de-assert BT reset pin */
+	rc = gpio_direction_output(gpio_bt_sys_rest_en, 0);
+	if (rc) {
+		pr_err("%s: Unable to set direction\n", __func__);
+		goto free_gpio;
+	}
+	pr_err("%s done\n", __func__);
+	return;
+
+free_gpio:
+	gpio_free(gpio_bt_sys_rest_en);
+	return;
+}
+#else
+#define bt_power_init(x) do {} while (0)
+#endif
+
 static struct platform_device *common_devices[] __initdata = {
 	&msm8960_device_acpuclk,
 	&msm8960_device_dmov,
@@ -2527,6 +2609,10 @@
 #endif
 	&msm_slim_ctrl,
 	&msm_device_wcnss_wlan,
+#if defined(CONFIG_BT) && defined(CONFIG_BT_HCIUART_ATH3K)
+	&msm_bluesleep_device,
+	&msm_bt_power_device,
+#endif
 #if defined(CONFIG_QSEECOM)
 	&qseecom_device,
 #endif
@@ -3078,6 +3164,7 @@
 	msm8960_init_dsps();
 	change_memory_power = &msm8960_change_memory_power;
 	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
+	bt_power_init();
 	if (socinfo_get_platform_subtype() == PLATFORM_SUBTYPE_SGLTE) {
 		mdm_sglte_device.dev.platform_data = &sglte_platform_data;
 		platform_device_register(&mdm_sglte_device);
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index b939dc2..240e094 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -20,9 +20,6 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/of_irq.h>
-#ifdef CONFIG_ION_MSM
-#include <linux/ion.h>
-#endif
 #include <linux/memory.h>
 #ifdef CONFIG_ANDROID_PMEM
 #include <linux/android_pmem.h>
@@ -52,17 +49,6 @@
 #include "lpm_resources.h"
 
 #define MSM_KERNEL_EBI1_MEM_SIZE	0x280000
-#ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY
-#define MSM_ION_SF_SIZE 0x4000000 /* 64 Mbytes */
-#else
-#define MSM_ION_SF_SIZE 0x2800000 /* 40 Mbytes */
-#endif
-#define MSM_ION_MM_FW_SIZE	0xa00000 /* (10MB) */
-#define MSM_ION_MM_SIZE		0x7800000 /* (120MB) */
-#define MSM_ION_QSECOM_SIZE	0x600000 /* (6MB) */
-#define MSM_ION_MFC_SIZE	SZ_8K
-#define MSM_ION_AUDIO_SIZE	0x2B4000
-#define MSM_ION_HEAP_NUM	8
 
 #ifdef CONFIG_KERNEL_PMEM_EBI_REGION
 static unsigned kernel_ebi1_mem_size = MSM_KERNEL_EBI1_MEM_SIZE;
@@ -90,121 +76,12 @@
 	return MEMTYPE_EBI1;
 }
 
-#ifdef CONFIG_ION_MSM
-static struct ion_cp_heap_pdata cp_mm_ion_pdata = {
-	.permission_type = IPT_TYPE_MM_CARVEOUT,
-	.align = PAGE_SIZE,
-};
-
-static struct ion_cp_heap_pdata cp_mfc_ion_pdata = {
-	.permission_type = IPT_TYPE_MFC_SHAREDMEM,
-	.align = PAGE_SIZE,
-};
-
-static struct ion_co_heap_pdata co_ion_pdata = {
-	.adjacent_mem_id = INVALID_HEAP_ID,
-	.align = PAGE_SIZE,
-};
-
-static struct ion_co_heap_pdata fw_co_ion_pdata = {
-	.adjacent_mem_id = ION_CP_MM_HEAP_ID,
-	.align = SZ_128K,
-};
-
-/**
- * These heaps are listed in the order they will be allocated. Due to
- * video hardware restrictions and content protection the FW heap has to
- * be allocated adjacent (below) the MM heap and the MFC heap has to be
- * allocated after the MM heap to ensure MFC heap is not more than 256MB
- * away from the base address of the FW heap.
- * However, the order of FW heap and MM heap doesn't matter since these
- * two heaps are taken care of by separate code to ensure they are adjacent
- * to each other.
- * Don't swap the order unless you know what you are doing!
- */
-static struct ion_platform_data ion_pdata = {
-	.nr = MSM_ION_HEAP_NUM,
-	.heaps = {
-		{
-			.id	= ION_SYSTEM_HEAP_ID,
-			.type	= ION_HEAP_TYPE_SYSTEM,
-			.name	= ION_VMALLOC_HEAP_NAME,
-		},
-		{
-			.id	= ION_CP_MM_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CP,
-			.name	= ION_MM_HEAP_NAME,
-			.size	= MSM_ION_MM_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &cp_mm_ion_pdata,
-		},
-		{
-			.id	= ION_MM_FIRMWARE_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_MM_FIRMWARE_HEAP_NAME,
-			.size	= MSM_ION_MM_FW_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &fw_co_ion_pdata,
-		},
-		{
-			.id	= ION_CP_MFC_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CP,
-			.name	= ION_MFC_HEAP_NAME,
-			.size	= MSM_ION_MFC_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &cp_mfc_ion_pdata,
-		},
-		{
-			.id	= ION_SF_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_SF_HEAP_NAME,
-			.size	= MSM_ION_SF_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &co_ion_pdata,
-		},
-		{
-			.id	= ION_IOMMU_HEAP_ID,
-			.type	= ION_HEAP_TYPE_IOMMU,
-			.name	= ION_IOMMU_HEAP_NAME,
-		},
-		{
-			.id	= ION_QSECOM_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_QSECOM_HEAP_NAME,
-			.size	= MSM_ION_QSECOM_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &co_ion_pdata,
-		},
-		{
-			.id	= ION_AUDIO_HEAP_ID,
-			.type	= ION_HEAP_TYPE_CARVEOUT,
-			.name	= ION_AUDIO_HEAP_NAME,
-			.size	= MSM_ION_AUDIO_SIZE,
-			.memory_type = ION_EBI_TYPE,
-			.extra_data = (void *) &co_ion_pdata,
-		},
-	}
-};
-
-static struct platform_device ion_dev = {
-	.name = "ion-msm",
-	.id = 1,
-	.dev = { .platform_data = &ion_pdata },
-};
-
-static void __init reserve_ion_memory(void)
+static void __init reserve_ebi_memory(void)
 {
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MM_FW_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_SF_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_MFC_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_QSECOM_SIZE;
-	msm_8974_reserve_table[MEMTYPE_EBI1].size += MSM_ION_AUDIO_SIZE;
 #ifdef CONFIG_KERNEL_PMEM_EBI_REGION
 	msm_8974_reserve_table[MEMTYPE_EBI1].size += kernel_ebi1_mem_size;
 #endif
 }
-#endif
 
 static struct resource smd_resource[] = {
 	{
@@ -370,9 +247,7 @@
 
 static void __init msm_8974_calculate_reserve_sizes(void)
 {
-#ifdef CONFIG_ION_MSM
-	reserve_ion_memory();
-#endif
+	reserve_ebi_memory();
 }
 
 static struct reserve_info msm_8974_reserve_info __initdata = {
@@ -535,9 +410,6 @@
 
 void __init msm_8974_add_devices(void)
 {
-#ifdef CONFIG_ION_MSM
-	platform_device_register(&ion_dev);
-#endif
 	platform_device_register(&msm_device_smd_8974);
 	platform_device_register(&android_usb_device);
 	platform_add_devices(msm_8974_stub_regulator_devices,
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 02b28b6..1827773 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -3200,12 +3200,36 @@
 	F_END
 };
 
+static struct branch_clk dsi1_reset_clk = {
+	.b = {
+		.reset_reg = SW_RESET_CORE_REG,
+		.reset_mask = BIT(7),
+		.halt_check = NOCHECK,
+	},
+	.c = {
+		.dbg_name = "dsi1_reset_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(dsi1_reset_clk.c),
+	},
+};
+
+static struct branch_clk dsi2_reset_clk = {
+	.b = {
+		.reset_reg = SW_RESET_CORE_REG,
+		.reset_mask = BIT(25),
+		.halt_check = NOCHECK,
+	},
+	.c = {
+		.dbg_name = "dsi2_reset_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(dsi2_reset_clk.c),
+	},
+};
+
 static struct rcg_clk dsi1_byte_clk = {
 	.b = {
 		.ctl_reg = DSI1_BYTE_CC_REG,
 		.en_mask = BIT(0),
-		.reset_reg = SW_RESET_CORE_REG,
-		.reset_mask = BIT(7),
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 21,
 		.retain_reg = DSI1_BYTE_CC_REG,
@@ -3228,8 +3252,6 @@
 	.b = {
 		.ctl_reg = DSI2_BYTE_CC_REG,
 		.en_mask = BIT(0),
-		.reset_reg = SW_RESET_CORE_REG,
-		.reset_mask = BIT(25),
 		.halt_reg = DBG_BUS_VEC_B_REG,
 		.halt_bit = 20,
 		.retain_reg = DSI2_BYTE_CC_REG,
@@ -3252,7 +3274,6 @@
 	.b = {
 		.ctl_reg = DSI1_ESC_CC_REG,
 		.en_mask = BIT(0),
-		.reset_reg = SW_RESET_CORE_REG,
 		.halt_reg = DBG_BUS_VEC_I_REG,
 		.halt_bit = 1,
 	},
@@ -5435,6 +5456,9 @@
 	CLK_LOOKUP("mem_clk",		ebi1_acpu_a_clk.c, ""),
 	CLK_LOOKUP("bus_clk",		afab_acpu_a_clk.c, ""),
 
+	CLK_LOOKUP("reset1_clk",	dsi1_reset_clk.c, "footswitch-8x60.4"),
+	CLK_LOOKUP("reset2_clk",	dsi2_reset_clk.c, "footswitch-8x60.4"),
+
 	CLK_LOOKUP("l2_mclk",		l2_m_clk,     ""),
 	CLK_LOOKUP("krait0_mclk",	krait0_m_clk, ""),
 	CLK_LOOKUP("krait1_mclk",	krait1_m_clk, ""),
@@ -5752,6 +5776,9 @@
 	CLK_LOOKUP("mem_clk",		ebi1_acpu_a_clk.c, ""),
 	CLK_LOOKUP("bus_clk",		afab_acpu_a_clk.c, ""),
 
+	CLK_LOOKUP("reset1_clk",	dsi1_reset_clk.c, "footswitch-8x60.4"),
+	CLK_LOOKUP("reset2_clk",	dsi2_reset_clk.c, "footswitch-8x60.4"),
+
 	CLK_LOOKUP("l2_mclk",		l2_m_clk,     ""),
 	CLK_LOOKUP("krait0_mclk",	krait0_m_clk, ""),
 	CLK_LOOKUP("krait1_mclk",	krait1_m_clk, ""),
@@ -6077,6 +6104,8 @@
 	CLK_LOOKUP("mem_clk",		ebi1_acpu_a_clk.c, ""),
 	CLK_LOOKUP("bus_clk",		afab_acpu_a_clk.c, ""),
 
+	CLK_LOOKUP("reset1_clk",	dsi1_reset_clk.c, "footswitch-8x60.4"),
+
 	CLK_LOOKUP("l2_mclk",		l2_m_clk,     ""),
 	CLK_LOOKUP("krait0_mclk",	krait0_m_clk, ""),
 	CLK_LOOKUP("krait1_mclk",	krait1_m_clk, ""),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 59d53bb..7dd3829 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -4356,72 +4356,76 @@
 };
 
 struct measure_mux_entry measure_mux[] = {
-	{&gcc_bam_dma_ahb_clk.c,		GCC_BASE, 0x00e8},
-	{&gcc_blsp1_ahb_clk.c,			GCC_BASE, 0x0090},
-	{&gcc_blsp1_qup1_i2c_apps_clk.c,	GCC_BASE, 0x0093},
-	{&gcc_blsp1_qup1_spi_apps_clk.c,	GCC_BASE, 0x0092},
-	{&gcc_blsp1_qup2_i2c_apps_clk.c,	GCC_BASE, 0x0098},
-	{&gcc_blsp1_qup2_spi_apps_clk.c,	GCC_BASE, 0x0096},
-	{&gcc_blsp1_qup3_i2c_apps_clk.c,	GCC_BASE, 0x009c},
-	{&gcc_blsp1_qup3_spi_apps_clk.c,	GCC_BASE, 0x009b},
-	{&gcc_blsp1_qup4_i2c_apps_clk.c,	GCC_BASE, 0x00a1},
-	{&gcc_blsp1_qup4_spi_apps_clk.c,	GCC_BASE, 0x00a0},
-	{&gcc_blsp1_qup5_i2c_apps_clk.c,	GCC_BASE, 0x00a5},
-	{&gcc_blsp1_qup5_spi_apps_clk.c,	GCC_BASE, 0x00a4},
-	{&gcc_blsp1_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00aa},
-	{&gcc_blsp1_qup6_spi_apps_clk.c,	GCC_BASE, 0x00a9},
-	{&gcc_blsp1_uart1_apps_clk.c,		GCC_BASE, 0x0094},
-	{&gcc_blsp1_uart2_apps_clk.c,		GCC_BASE, 0x0099},
-	{&gcc_blsp1_uart3_apps_clk.c,		GCC_BASE, 0x009d},
-	{&gcc_blsp1_uart4_apps_clk.c,		GCC_BASE, 0x00a2},
-	{&gcc_blsp1_uart5_apps_clk.c,		GCC_BASE, 0x00a6},
-	{&gcc_blsp1_uart6_apps_clk.c,		GCC_BASE, 0x00ab},
-	{&gcc_blsp2_ahb_clk.c,			GCC_BASE, 0x00b0},
-	{&gcc_blsp2_qup1_i2c_apps_clk.c,	GCC_BASE, 0x00b3},
-	{&gcc_blsp2_qup1_spi_apps_clk.c,	GCC_BASE, 0x00b2},
-	{&gcc_blsp2_qup2_i2c_apps_clk.c,	GCC_BASE, 0x00b8},
-	{&gcc_blsp2_qup2_spi_apps_clk.c,	GCC_BASE, 0x00b6},
-	{&gcc_blsp2_qup3_i2c_apps_clk.c,	GCC_BASE, 0x00bc},
-	{&gcc_blsp2_qup3_spi_apps_clk.c,	GCC_BASE, 0x00bb},
-	{&gcc_blsp2_qup4_i2c_apps_clk.c,	GCC_BASE, 0x00c1},
-	{&gcc_blsp2_qup4_spi_apps_clk.c,	GCC_BASE, 0x00c0},
-	{&gcc_blsp2_qup5_i2c_apps_clk.c,	GCC_BASE, 0x00c5},
-	{&gcc_blsp2_qup5_spi_apps_clk.c,	GCC_BASE, 0x00c4},
-	{&gcc_blsp2_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00ca},
-	{&gcc_blsp2_qup6_spi_apps_clk.c,	GCC_BASE, 0x00c9},
-	{&gcc_blsp2_uart1_apps_clk.c,		GCC_BASE, 0x00b4},
-	{&gcc_blsp2_uart2_apps_clk.c,		GCC_BASE, 0x00b9},
-	{&gcc_blsp2_uart3_apps_clk.c,		GCC_BASE, 0x00bd},
-	{&gcc_blsp2_uart4_apps_clk.c,		GCC_BASE, 0x00c2},
-	{&gcc_blsp2_uart5_apps_clk.c,		GCC_BASE, 0x00c6},
-	{&gcc_blsp2_uart6_apps_clk.c,		GCC_BASE, 0x00cb},
-	{&gcc_boot_rom_ahb_clk.c,		GCC_BASE, 0x0100},
-	{&gcc_ocmem_noc_cfg_ahb_clk.c,		GCC_BASE, 0x0029},
-	{&gcc_mmss_noc_cfg_ahb_clk.c,		GCC_BASE, 0x002A},
-	{&gcc_mss_cfg_ahb_clk.c,		GCC_BASE, 0x0030},
-	{&gcc_ce1_clk.c,			GCC_BASE, 0x0140},
-	{&gcc_ce2_clk.c,			GCC_BASE, 0x0148},
-	{&gcc_pdm2_clk.c,			GCC_BASE, 0x00da},
-	{&gcc_pdm_ahb_clk.c,			GCC_BASE, 0x00d8},
-	{&gcc_prng_ahb_clk.c,			GCC_BASE, 0x00e0},
-	{&gcc_sdcc1_ahb_clk.c,			GCC_BASE, 0x0071},
-	{&gcc_sdcc1_apps_clk.c,			GCC_BASE, 0x0070},
-	{&gcc_sdcc2_ahb_clk.c,			GCC_BASE, 0x0079},
-	{&gcc_sdcc2_apps_clk.c,			GCC_BASE, 0x0078},
-	{&gcc_sdcc3_ahb_clk.c,			GCC_BASE, 0x0081},
-	{&gcc_sdcc3_apps_clk.c,			GCC_BASE, 0x0080},
-	{&gcc_sdcc4_ahb_clk.c,			GCC_BASE, 0x0089},
-	{&gcc_sdcc4_apps_clk.c,			GCC_BASE, 0x0088},
-	{&gcc_tsif_ahb_clk.c,			GCC_BASE, 0x00f0},
-	{&gcc_tsif_ref_clk.c,			GCC_BASE, 0x00f1},
+	{&gcc_pdm_ahb_clk.c,			GCC_BASE, 0x00d0},
+	{&gcc_blsp2_qup1_i2c_apps_clk.c,	GCC_BASE, 0x00ab},
+	{&gcc_blsp2_qup3_spi_apps_clk.c,	GCC_BASE, 0x00b3},
+	{&gcc_blsp2_uart5_apps_clk.c,		GCC_BASE, 0x00be},
 	{&gcc_usb30_master_clk.c,		GCC_BASE, 0x0050},
+	{&gcc_blsp2_qup3_i2c_apps_clk.c,	GCC_BASE, 0x00b4},
+	{&gcc_usb_hsic_system_clk.c,		GCC_BASE, 0x0059},
+	{&gcc_blsp2_uart3_apps_clk.c,		GCC_BASE, 0x00b5},
+	{&gcc_usb_hsic_io_cal_clk.c,		GCC_BASE, 0x005b},
+	{&gcc_ce2_axi_clk.c,			GCC_BASE, 0x0141},
+	{&gcc_sdcc3_ahb_clk.c,			GCC_BASE, 0x0079},
+	{&gcc_blsp1_qup5_i2c_apps_clk.c,	GCC_BASE, 0x009d},
+	{&gcc_blsp1_qup1_spi_apps_clk.c,	GCC_BASE, 0x008a},
+	{&gcc_blsp2_uart4_apps_clk.c,		GCC_BASE, 0x00ba},
+	{&gcc_ce2_clk.c,			GCC_BASE, 0x0140},
+	{&gcc_blsp1_uart2_apps_clk.c,		GCC_BASE, 0x0091},
+	{&gcc_sdcc1_ahb_clk.c,			GCC_BASE, 0x0069},
+	{&gcc_mss_cfg_ahb_clk.c,		GCC_BASE, 0x0030},
+	{&gcc_tsif_ahb_clk.c,			GCC_BASE, 0x00e8},
+	{&gcc_sdcc4_ahb_clk.c,			GCC_BASE, 0x0081},
+	{&gcc_blsp1_qup4_spi_apps_clk.c,	GCC_BASE, 0x0098},
+	{&gcc_blsp2_qup4_spi_apps_clk.c,	GCC_BASE, 0x00b8},
+	{&gcc_blsp1_qup3_spi_apps_clk.c,	GCC_BASE, 0x0093},
+	{&gcc_blsp1_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00a2},
+	{&gcc_blsp2_qup6_i2c_apps_clk.c,	GCC_BASE, 0x00c2},
+	{&gcc_bam_dma_ahb_clk.c,		GCC_BASE, 0x00e0},
+	{&gcc_sdcc3_apps_clk.c,			GCC_BASE, 0x0078},
+	{&gcc_usb_hs_system_clk.c,		GCC_BASE, 0x0060},
+	{&gcc_blsp1_ahb_clk.c,			GCC_BASE, 0x0088},
+	{&gcc_sdcc1_apps_clk.c,			GCC_BASE, 0x0068},
+	{&gcc_blsp2_qup5_i2c_apps_clk.c,	GCC_BASE, 0x00bd},
+	{&gcc_blsp1_uart4_apps_clk.c,		GCC_BASE, 0x009a},
+	{&gcc_blsp2_qup2_spi_apps_clk.c,	GCC_BASE, 0x00ae},
+	{&gcc_blsp2_qup6_spi_apps_clk.c,	GCC_BASE, 0x00c1},
+	{&gcc_blsp2_uart2_apps_clk.c,		GCC_BASE, 0x00b1},
+	{&gcc_blsp1_qup2_spi_apps_clk.c,	GCC_BASE, 0x008e},
+	{&gcc_usb_hsic_ahb_clk.c,		GCC_BASE, 0x0058},
+	{&gcc_blsp1_uart3_apps_clk.c,		GCC_BASE, 0x0095},
 	{&gcc_usb30_mock_utmi_clk.c,		GCC_BASE, 0x0052},
-	{&gcc_usb_hs_ahb_clk.c,			GCC_BASE, 0x0069},
-	{&gcc_usb_hs_system_clk.c,		GCC_BASE, 0x0068},
-	{&gcc_usb_hsic_ahb_clk.c,		GCC_BASE, 0x0060},
-	{&gcc_usb_hsic_clk.c,			GCC_BASE, 0x0062},
-	{&gcc_usb_hsic_io_cal_clk.c,		GCC_BASE, 0x0063},
-	{&gcc_usb_hsic_system_clk.c,		GCC_BASE, 0x0061},
+	{&gcc_ce1_axi_clk.c,			GCC_BASE, 0x0139},
+	{&gcc_sdcc4_apps_clk.c,			GCC_BASE, 0x0080},
+	{&gcc_blsp1_qup5_spi_apps_clk.c,	GCC_BASE, 0x009c},
+	{&gcc_usb_hs_ahb_clk.c,			GCC_BASE, 0x0061},
+	{&gcc_blsp1_qup6_spi_apps_clk.c,	GCC_BASE, 0x00a1},
+	{&gcc_blsp2_qup2_i2c_apps_clk.c,	GCC_BASE, 0x00b0},
+	{&gcc_prng_ahb_clk.c,			GCC_BASE, 0x00d8},
+	{&gcc_blsp1_qup3_i2c_apps_clk.c,	GCC_BASE, 0x0094},
+	{&gcc_usb_hsic_clk.c,			GCC_BASE, 0x005a},
+	{&gcc_blsp1_uart6_apps_clk.c,		GCC_BASE, 0x00a3},
+	{&gcc_sdcc2_apps_clk.c,			GCC_BASE, 0x0070},
+	{&gcc_tsif_ref_clk.c,			GCC_BASE, 0x00e9},
+	{&gcc_blsp1_uart1_apps_clk.c,		GCC_BASE, 0x008c},
+	{&gcc_blsp2_qup5_spi_apps_clk.c,	GCC_BASE, 0x00bc},
+	{&gcc_blsp1_qup4_i2c_apps_clk.c,	GCC_BASE, 0x0099},
+	{&gcc_mmss_noc_cfg_ahb_clk.c,		GCC_BASE, 0x002a},
+	{&gcc_blsp2_ahb_clk.c,			GCC_BASE, 0x00a8},
+	{&gcc_boot_rom_ahb_clk.c,		GCC_BASE, 0x00f8},
+	{&gcc_ce1_ahb_clk.c,			GCC_BASE, 0x013a},
+	{&gcc_pdm2_clk.c,			GCC_BASE, 0x00d2},
+	{&gcc_blsp2_qup4_i2c_apps_clk.c,	GCC_BASE, 0x00b9},
+	{&gcc_ce2_ahb_clk.c,			GCC_BASE, 0x0142},
+	{&gcc_blsp1_uart5_apps_clk.c,		GCC_BASE, 0x009e},
+	{&gcc_blsp2_qup1_spi_apps_clk.c,	GCC_BASE, 0x00aa},
+	{&gcc_blsp1_qup2_i2c_apps_clk.c,	GCC_BASE, 0x0090},
+	{&gcc_blsp2_uart1_apps_clk.c,		GCC_BASE, 0x00ac},
+	{&gcc_blsp1_qup1_i2c_apps_clk.c,	GCC_BASE, 0x008b},
+	{&gcc_blsp2_uart6_apps_clk.c,		GCC_BASE, 0x00c3},
+	{&gcc_sdcc2_ahb_clk.c,			GCC_BASE, 0x0071},
+	{&gcc_ocmem_noc_cfg_ahb_clk.c,		GCC_BASE, 0x0029},
+	{&gcc_ce1_clk.c,			GCC_BASE, 0x0138},
 	{&mmss_mmssnoc_ahb_clk.c,		MMSS_BASE, 0x0001},
 	{&mmss_mmssnoc_axi_clk.c,		MMSS_BASE, 0x0004},
 	{&ocmemnoc_clk.c,			MMSS_BASE, 0x0007},
@@ -4956,14 +4960,19 @@
 	CLK_LOOKUP("osr_clk", audio_core_lpaif_quad_osr_clk.c, ""),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_quad_ebit_clk.c, ""),
 	CLK_LOOKUP("ibit_clk", audio_core_lpaif_quad_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm0_clk_src.c, ""),
+	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm0_clk_src.c,
+						"msm-dai-q6.4106"),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm0_ebit_clk.c, ""),
+	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c,
+						"msm-dai-q6.4106"),
 	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm0_ibit_clk.c, ""),
 	CLK_LOOKUP("core_clk", audio_core_lpaif_pcm1_clk_src.c, ""),
 	CLK_LOOKUP("ebit_clk", audio_core_lpaif_pcm1_ebit_clk.c, ""),
 	CLK_LOOKUP("ibit_clk", audio_core_lpaif_pcm1_ibit_clk.c, ""),
-	CLK_LOOKUP("core_clk_src", audio_core_lpaif_pcmoe_clk_src.c, ""),
-	CLK_LOOKUP("core_clk", audio_core_lpaif_pcmoe_clk.c, ""),
+	CLK_LOOKUP("core_oe_src_clk", audio_core_lpaif_pcmoe_clk_src.c,
+						"msm-dai-q6.4106"),
+	CLK_LOOKUP("core_oe_clk", audio_core_lpaif_pcmoe_clk.c,
+						"msm-dai-q6.4106"),
 
 	CLK_LOOKUP("core_clk",       mss_xo_q6_clk.c, "pil-q6v5-mss"),
 	CLK_LOOKUP("bus_clk",       mss_bus_q6_clk.c, "pil-q6v5-mss"),
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index ac26acf..1f0bd2c 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -1837,6 +1837,8 @@
 		{ .name = "lut_clk" },
 		{ .name = "tv_src_clk" },
 		{ .name = "tv_clk" },
+		{ .name = "reset1_clk" },
+		{ .name = "reset2_clk" },
 		{ 0 }
 	},
 	.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 7cb6e95..fa24ba9 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -425,6 +425,7 @@
 		{ .name = "lut_clk" },
 		{ .name = "tv_src_clk" },
 		{ .name = "tv_clk" },
+		{ .name = "reset1_clk" },
 		{ 0 }
 	},
 	.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 3d1926c..724eed8 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -2151,6 +2151,8 @@
 		{ .name = "lut_clk" },
 		{ .name = "tv_src_clk" },
 		{ .name = "tv_clk" },
+		{ .name = "reset1_clk" },
+		{ .name = "reset2_clk" },
 		{ 0 }
 	},
 	.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 8fef953..2642864 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -1661,18 +1661,23 @@
 	/* Part number for 1GHz part */
 	case 0x770:
 	case 0x771:
+	case 0x77C:
 	case 0x780:
+	case 0x8D0:
 		cpu = MSM8625;
 		break;
 	/* Part number for 1.2GHz part */
 	case 0x773:
 	case 0x774:
 	case 0x781:
+	case 0x8D1:
 		cpu = MSM8625A;
 		break;
 	case 0x775:
 	case 0x776:
+	case 0x77D:
 	case 0x782:
+	case 0x8D2:
 		cpu = MSM8625AB;
 		break;
 	default:
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index 8a5d0e8..a6473c6 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -26,7 +26,7 @@
 #include <mach/dma.h>
 #include <mach/board.h>
 #include <asm/clkdev.h>
-
+#include <linux/ion.h>
 #include "devices.h"
 #include "footswitch.h"
 
@@ -963,8 +963,8 @@
 };
 
 struct msm_vidc_platform_data vidc_platform_data = {
-	.memtype = MEMTYPE_EBI0,
-	.enable_ion = 0,
+	.memtype = ION_CAMERA_HEAP_ID,
+	.enable_ion = 1,
 	.disable_dmx = 0,
 	.cont_mode_dpb_count = 8
 };
diff --git a/arch/arm/mach-msm/include/mach/cpuidle.h b/arch/arm/mach-msm/include/mach/cpuidle.h
index 2a5aa97..8566e7f 100644
--- a/arch/arm/mach-msm/include/mach/cpuidle.h
+++ b/arch/arm/mach-msm/include/mach/cpuidle.h
@@ -25,12 +25,16 @@
 	enum msm_pm_sleep_mode mode_nr;
 };
 
-#ifdef CONFIG_CPU_IDLE
+#ifdef CONFIG_PM
 s32 msm_cpuidle_get_deep_idle_latency(void);
+#else
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_IDLE
 int msm_cpuidle_init(void);
 #else
 static inline int msm_cpuidle_init(void) { return -ENOSYS; }
-static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
 #endif
 
 #ifdef CONFIG_MSM_SLEEP_STATS
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index c4877cc..6ec12c1 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -32,6 +32,7 @@
 	struct platform_device *peripheral_platform_device;
 	const unsigned int ramdump_timeout_ms;
 	int image_upgrade_supported;
+	struct gpiomux_setting *mdm2ap_status_gpio_run_cfg;
 };
 
 #endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index bf92f7d..6b7ad9a 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -78,6 +78,7 @@
 int platform_physical_remove_pages(u64, u64);
 int platform_physical_active_pages(u64, u64);
 int platform_physical_low_power_pages(u64, u64);
+int msm_get_memory_type_from_name(const char *memtype_name);
 
 extern int (*change_memory_power)(u64, u64, int);
 
diff --git a/arch/arm/mach-msm/include/mach/msm_hsusb.h b/arch/arm/mach-msm/include/mach/msm_hsusb.h
index 4f140cc..4e22b0f 100644
--- a/arch/arm/mach-msm/include/mach/msm_hsusb.h
+++ b/arch/arm/mach-msm/include/mach/msm_hsusb.h
@@ -202,8 +202,4 @@
 	struct clk *ebi1_clk;
 };
 
-int msm_ep_config(struct usb_ep *ep);
-int msm_ep_unconfig(struct usb_ep *ep);
-int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size);
-
 #endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index 415f8ed..904de5e 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -47,6 +47,24 @@
 	struct ocmem_chunk chunks[OCMEM_MAX_CHUNKS];
 };
 
+enum ocmem_power_state {
+	OCMEM_OFF = 0x0,
+	OCMEM_RETENTION,
+	OCMEM_ON,
+	OCMEM_MAX = OCMEM_ON,
+};
+
+struct ocmem_resource {
+	unsigned resource_id;
+	unsigned num_keys;
+	unsigned int *keys;
+};
+
+struct ocmem_vectors {
+	unsigned num_resources;
+	struct ocmem_resource *r;
+};
+
 /* List of clients that allocate/interact with OCMEM */
 /* Must be in sync with client_names */
 enum ocmem_client {
@@ -120,4 +138,14 @@
 int ocmem_evict(int client_id);
 
 int ocmem_restore(int client_id);
+
+/* Power Control APIs */
+int ocmem_set_power_state(int client_id, struct ocmem_buf *buf,
+				enum ocmem_power_state new_state);
+
+enum ocmem_power_state ocmem_get_power_state(int client_id,
+				struct ocmem_buf *buf);
+
+struct ocmem_vectors *ocmem_get_vectors(int client_id,
+						struct ocmem_buf *buf);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 70b5a45..49e283d 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -16,11 +16,11 @@
 /** All interfaces in this header should only be used by OCMEM driver
  *  Client drivers should use wrappers available in ocmem.h
  **/
-
-#include "ocmem.h"
-#include <mach/msm_iomap.h>
-#include <asm/io.h>
 #include <linux/platform_device.h>
+#include <asm/io.h>
+#include <mach/msm_iomap.h>
+#include "ocmem.h"
+
 
 #define OCMEM_PHYS_BASE 0xFEC00000
 #define OCMEM_PHYS_SIZE 0x180000
@@ -62,6 +62,13 @@
 	SCHED_DUMP,
 };
 
+/* Operational modes of each region */
+enum region_mode {
+	WIDE_MODE = 0x0,
+	THIN_MODE,
+	MODE_DEFAULT = WIDE_MODE,
+};
+
 struct ocmem_plat_data {
 	void __iomem *vbase;
 	unsigned long size;
@@ -77,6 +84,8 @@
 	int ocmem_irq;
 	int dm_irq;
 	bool interleaved;
+	bool rpm_pwr_ctrl;
+	unsigned rpm_rsc_type;
 };
 
 struct ocmem_eviction_data {
@@ -113,6 +122,8 @@
 	unsigned long req_start;
 	unsigned long req_end;
 	unsigned long req_sz;
+	/* Request Power State */
+	unsigned power_state;
 	struct ocmem_eviction_data *edata;
 };
 
@@ -154,6 +165,18 @@
 		return NULL;
 }
 
+/* Simple wrappers which will have debug features added later */
+static inline int ocmem_read(void *at)
+{
+	return readl_relaxed(at);
+}
+
+static inline int ocmem_write(unsigned long val, void *at)
+{
+	writel_relaxed(val, at);
+	return 0;
+}
+
 struct ocmem_zone *get_zone(unsigned);
 unsigned long offset_to_phys(unsigned long);
 unsigned long phys_to_offset(unsigned long);
@@ -170,6 +193,7 @@
 
 int ocmem_sched_init(void);
 int ocmem_rdm_init(struct platform_device *);
+int ocmem_core_init(struct platform_device *);
 int process_allocate(int, struct ocmem_handle *, unsigned long, unsigned long,
 			unsigned long, bool, bool);
 int process_free(int, struct ocmem_handle *);
@@ -180,4 +204,6 @@
 int ocmem_rdm_transfer(int, struct ocmem_map_list *,
 				unsigned long, int);
 unsigned long process_quota(int);
+int ocmem_memory_off(int, unsigned long, unsigned long);
+int ocmem_memory_on(int, unsigned long, unsigned long);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/usb_bam.h b/arch/arm/mach-msm/include/mach/usb_bam.h
index ec135a3..47313a7 100644
--- a/arch/arm/mach-msm/include/mach/usb_bam.h
+++ b/arch/arm/mach-msm/include/mach/usb_bam.h
@@ -12,6 +12,7 @@
 
 #ifndef _USB_BAM_H_
 #define _USB_BAM_H_
+#include "sps.h"
 
 /**
  * SPS Pipes direction.
@@ -43,7 +44,7 @@
  * @return 0 on success, negative value on error
  *
  */
-int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx);
+int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx);
 
 /**
  * Register a wakeup callback from peer BAM.
@@ -57,8 +58,40 @@
  */
 int usb_bam_register_wake_cb(u8 idx,
 	 int (*callback)(void *), void* param);
+
+/**
+ * Disconnect USB-to-Periperal SPS connection.
+ *
+ * @idx - Connection index.
+ *
+ * @return 0 on success, negative value on error
+ */
+int usb_bam_disconnect_pipe(u8 idx);
+
+/**
+ * Returns usb bam connection parameters.
+ *
+ * @conn_idx - Connection index.
+ *
+ * @usb_bam_pipe_dir - Usb pipe direction to/from peripheral.
+ *
+ * @usb_bam_handle - Usb bam handle.
+ *
+ * @usb_bam_pipe_idx - Usb bam pipe index.
+ *
+ * @peer_pipe_idx - Peer pipe index.
+ *
+ * @desc_fifo - Descriptor fifo parameters.
+ *
+ * @data_fifo - Data fifo parameters.
+ *
+ */
+void get_bam2bam_connection_info(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+	u32 *usb_bam_handle, u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+	struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo);
+
 #else
-static inline int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx)
+static inline int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx)
 {
 	return -ENODEV;
 }
@@ -68,6 +101,18 @@
 {
 	return -ENODEV;
 }
+
+static inline int usb_bam_disconnect_pipe(u8 idx)
+{
+	return -ENODEV;
+}
+
+static inline void get_bam2bam_connection_info(u8 conn_idx,
+	enum usb_bam_pipe_dir pipe_dir, u32 *usb_bam_handle,
+	u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+	struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo)
+{
+	return;
+}
 #endif
 #endif				/* _USB_BAM_H_ */
-
diff --git a/arch/arm/mach-msm/mdm2.c b/arch/arm/mach-msm/mdm2.c
index e74af2e..07f3efc 100644
--- a/arch/arm/mach-msm/mdm2.c
+++ b/arch/arm/mach-msm/mdm2.c
@@ -44,7 +44,7 @@
 
 #define MDM_PBLRDY_CNT		20
 
-static int mdm_debug_on;
+static int mdm_debug_mask;
 static int power_on_count;
 static int hsic_peripheral_status;
 static DEFINE_MUTEX(hsic_status_lock);
@@ -234,7 +234,7 @@
 
 static void debug_state_changed(int value)
 {
-	mdm_debug_on = value;
+	mdm_debug_mask = value;
 }
 
 static void mdm_status_changed(struct mdm_modem_drv *mdm_drv, int value)
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 6b40cda..6ca9045 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -30,6 +30,7 @@
 #include <linux/workqueue.h>
 #include <linux/clk.h>
 #include <linux/mfd/pmic8058.h>
+#include <linux/msm_charm.h>
 #include <asm/mach-types.h>
 #include <asm/uaccess.h>
 #include <mach/mdm2.h>
@@ -37,7 +38,7 @@
 #include <mach/subsystem_notif.h>
 #include <mach/subsystem_restart.h>
 #include <mach/rpm.h>
-#include <linux/msm_charm.h>
+#include <mach/gpiomux.h>
 #include "msm_watchdog.h"
 #include "mdm_private.h"
 #include "sysmon.h"
@@ -48,10 +49,11 @@
 #define MDM_RDUMP_TIMEOUT	120000L
 #define MDM2AP_STATUS_TIMEOUT_MS 60000L
 
-static int mdm_debug_on;
+static unsigned int mdm_debug_mask;
 static struct workqueue_struct *mdm_queue;
 static struct workqueue_struct *mdm_sfr_queue;
 static unsigned int dump_timeout_ms;
+static int vddmin_gpios_sent;
 
 #define EXTERNAL_MODEM "external_modem"
 
@@ -68,6 +70,13 @@
 #define SFR_MAX_RETRIES		10
 #define SFR_RETRY_INTERVAL	1000
 
+enum gpio_update_config {
+	GPIO_UPDATE_BOOTING_CONFIG = 1,
+	GPIO_UPDATE_RUNNING_CONFIG,
+};
+static int mdm2ap_status_valid_old_config;
+static struct gpiomux_setting mdm2ap_status_old_config;
+
 static irqreturn_t mdm_vddmin_change(int irq, void *dev_id)
 {
 	int value = gpio_get_value(
@@ -92,6 +101,7 @@
 	if (!vddmin_res)
 		return;
 
+	pr_info("Enabling vddmin logging\n");
 	req.id = vddmin_res->rpm_id;
 	req.value = ((uint32_t)vddmin_res->ap2mdm_vddmin_gpio & 0x0000FFFF)
 							<< 16;
@@ -100,7 +110,7 @@
 
 	msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
 
-	/* Monitor low power gpio from mdm */
+	/* Start monitoring low power gpio from mdm */
 	irq = MSM_GPIO_TO_INT(vddmin_res->mdm2ap_vddmin_gpio);
 	if (irq < 0) {
 		pr_err("%s: could not get LPM POWER IRQ resource.\n",
@@ -163,6 +173,37 @@
 
 static DECLARE_DELAYED_WORK(mdm2ap_status_check_work, mdm2ap_status_check);
 
+static void mdm_update_gpio_configs(enum gpio_update_config gpio_config)
+{
+	/* Some gpio configuration may need updating after modem bootup.*/
+	switch (gpio_config) {
+	case GPIO_UPDATE_RUNNING_CONFIG:
+		if (mdm_drv->pdata->mdm2ap_status_gpio_run_cfg) {
+			if (msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
+				GPIOMUX_ACTIVE,
+				mdm_drv->pdata->mdm2ap_status_gpio_run_cfg,
+				&mdm2ap_status_old_config))
+				pr_err("%s: failed updating running gpio config\n",
+					   __func__);
+			else
+				mdm2ap_status_valid_old_config = 1;
+		}
+		break;
+	case GPIO_UPDATE_BOOTING_CONFIG:
+		if (mdm2ap_status_valid_old_config) {
+			msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
+					GPIOMUX_ACTIVE,
+					&mdm2ap_status_old_config,
+					NULL);
+			mdm2ap_status_valid_old_config = 0;
+		}
+		break;
+	default:
+		pr_err("%s: called with no config\n", __func__);
+		break;
+	}
+}
+
 long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg)
 {
@@ -205,11 +246,10 @@
 		else
 			first_boot = 0;
 
-		/* Start a timer to check that the mdm2ap_status gpio
-		 * goes high.
+		/* If successful, start a timer to check that the mdm2ap_status
+		 * gpio goes high.
 		 */
-
-		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
+		if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
 			schedule_delayed_work(&mdm2ap_status_check_work,
 				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
 		break;
@@ -266,6 +306,9 @@
 	pr_debug("%s: status:%d\n", __func__, value);
 	if (mdm_drv->mdm_ready && mdm_drv->ops->status_cb)
 		mdm_drv->ops->status_cb(mdm_drv, value);
+
+	/* Update gpio configuration to "running" config. */
+	mdm_update_gpio_configs(GPIO_UPDATE_RUNNING_CONFIG);
 }
 
 static DECLARE_WORK(mdm_status_work, mdm_status_fn);
@@ -364,6 +407,7 @@
 static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
 {
 	mdm_drv->mdm_ready = 0;
+	cancel_delayed_work(&mdm2ap_status_check_work);
 	gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
 	if (mdm_drv->pdata->ramdump_delay_ms > 0) {
 		/* Wait for the external modem to complete
@@ -371,10 +415,13 @@
 		 */
 		msleep(mdm_drv->pdata->ramdump_delay_ms);
 	}
-	if (!mdm_drv->mdm_unexpected_reset_occurred)
+	if (!mdm_drv->mdm_unexpected_reset_occurred) {
 		mdm_drv->ops->reset_mdm_cb(mdm_drv);
-	else
+		/* Update gpio configuration to "booting" config. */
+		mdm_update_gpio_configs(GPIO_UPDATE_BOOTING_CONFIG);
+	} else {
 		mdm_drv->mdm_unexpected_reset_occurred = 0;
+	}
 	return 0;
 }
 
@@ -404,6 +451,7 @@
 				const struct subsys_desc *crashed_subsys)
 {
 	mdm_drv->mdm_ram_dump_status = 0;
+	cancel_delayed_work(&mdm2ap_status_check_work);
 	if (want_dumps) {
 		mdm_drv->boot_type = CHARM_RAM_DUMPS;
 		complete(&mdm_needs_reload);
@@ -416,8 +464,11 @@
 			pr_info("%s: mdm modem ramdumps completed.\n",
 					__func__);
 		INIT_COMPLETION(mdm_ram_dumps);
-		if (!mdm_drv->pdata->no_powerdown_after_ramdumps)
+		if (!mdm_drv->pdata->no_powerdown_after_ramdumps) {
 			mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+			/* Update gpio configuration to "booting" config. */
+			mdm_update_gpio_configs(GPIO_UPDATE_BOOTING_CONFIG);
+		}
 	}
 	return mdm_drv->mdm_ram_dump_status;
 }
@@ -429,23 +480,33 @@
 	.name = EXTERNAL_MODEM,
 };
 
-static int mdm_debug_on_set(void *data, u64 val)
+/* Once the gpios are sent to RPM and debugging
+ * starts, there is no way to stop it without
+ * rebooting the device.
+ */
+static int mdm_debug_mask_set(void *data, u64 val)
 {
-	mdm_debug_on = val;
+	if (!vddmin_gpios_sent &&
+		(val & MDM_DEBUG_MASK_VDDMIN_SETUP)) {
+		mdm_setup_vddmin_gpios();
+		vddmin_gpios_sent = 1;
+	}
+
+	mdm_debug_mask = val;
 	if (mdm_drv->ops->debug_state_changed_cb)
-		mdm_drv->ops->debug_state_changed_cb(mdm_debug_on);
+		mdm_drv->ops->debug_state_changed_cb(mdm_debug_mask);
 	return 0;
 }
 
-static int mdm_debug_on_get(void *data, u64 *val)
+static int mdm_debug_mask_get(void *data, u64 *val)
 {
-	*val = mdm_debug_on;
+	*val = mdm_debug_mask;
 	return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(mdm_debug_on_fops,
-			mdm_debug_on_get,
-			mdm_debug_on_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(mdm_debug_mask_fops,
+			mdm_debug_mask_get,
+			mdm_debug_mask_set, "%llu\n");
 
 static int mdm_debugfs_init(void)
 {
@@ -455,8 +516,8 @@
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 
-	debugfs_create_file("debug_on", 0644, dent, NULL,
-			&mdm_debug_on_fops);
+	debugfs_create_file("debug_mask", 0644, dent, NULL,
+			&mdm_debug_mask_fops);
 	return 0;
 }
 
@@ -552,7 +613,7 @@
 
 	mdm_modem_initialize_data(pdev, p_mdm_cb);
 	if (mdm_drv->ops->debug_state_changed_cb)
-		mdm_drv->ops->debug_state_changed_cb(mdm_debug_on);
+		mdm_drv->ops->debug_state_changed_cb(mdm_debug_mask);
 
 	gpio_request(mdm_drv->ap2mdm_status_gpio, "AP2MDM_STATUS");
 	gpio_request(mdm_drv->ap2mdm_errfatal_gpio, "AP2MDM_ERRFATAL");
@@ -688,8 +749,6 @@
 	 */
 	if (mdm_drv->ap2mdm_pmic_pwr_en_gpio > 0)
 		gpio_direction_output(mdm_drv->ap2mdm_pmic_pwr_en_gpio, 1);
-	/* Register VDDmin gpios with RPM */
-	mdm_setup_vddmin_gpios();
 
 	/* Perform early powerup of the external modem in order to
 	 * allow tabla devices to be found.
diff --git a/arch/arm/mach-msm/mdm_private.h b/arch/arm/mach-msm/mdm_private.h
index 7aba83d..d1e85d3 100644
--- a/arch/arm/mach-msm/mdm_private.h
+++ b/arch/arm/mach-msm/mdm_private.h
@@ -13,6 +13,7 @@
 #ifndef _ARCH_ARM_MACH_MSM_MDM_PRIVATE_H
 #define _ARCH_ARM_MACH_MSM_MDM_PRIVATE_H
 
+#define MDM_DEBUG_MASK_VDDMIN_SETUP (0x00000002)
 struct mdm_modem_drv;
 
 struct mdm_ops {
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 63c2d3a..4a2fd7c 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -390,24 +390,33 @@
 	[MEMTYPE_EBI1] = "EBI1",
 };
 
-static int reserve_memory_type(char *mem_name,
-				struct memtype_reserve *reserve_table,
-				int size)
+int msm_get_memory_type_from_name(const char *memtype_name)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
-		if (memtype_names[i] && strcmp(mem_name,
-				memtype_names[i]) == 0) {
-			reserve_table[i].size += size;
-			return 0;
-		}
+		if (memtype_names[i] &&
+		    strcmp(memtype_name, memtype_names[i]) == 0)
+			return i;
 	}
 
-	pr_err("Could not find memory type %s\n", mem_name);
+	pr_err("Could not find memory type %s\n", memtype_name);
 	return -EINVAL;
 }
 
+static int reserve_memory_type(const char *mem_name,
+				struct memtype_reserve *reserve_table,
+				int size)
+{
+	int ret = msm_get_memory_type_from_name(mem_name);
+
+	if (ret >= 0) {
+		reserve_table[ret].size += size;
+		ret = 0;
+	}
+	return ret;
+}
+
 static int check_for_compat(unsigned long node)
 {
 	char **start = __compat_exports_start;
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index 753f6fb..a9c3f4c 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -24,14 +24,8 @@
 #include <linux/seq_file.h>
 #include <mach/ocmem_priv.h>
 
-/* This code is to temporarily work around the default state of OCMEM
-   regions in Virtio. These registers will be read from DT in a subsequent
-   patch which initializes the regions to appropriate default state.
-*/
-
 #define OCMEM_REGION_CTL_BASE 0xFDD0003C
 #define OCMEM_REGION_CTL_SIZE 0xFD0
-#define REGION_ENABLE 0x00003333
 #define GRAPHICS_REGION_CTL (0x17F000)
 
 struct ocmem_partition {
@@ -269,6 +263,30 @@
 	return i;
 }
 
+#if defined(CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL)
+static int parse_power_ctrl_config(struct ocmem_plat_data *pdata,
+					struct device_node *node)
+{
+	pdata->rpm_pwr_ctrl = false;
+	pdata->rpm_rsc_type = ~0x0;
+	return 0;
+}
+#else
+static int parse_power_ctrl_config(struct ocmem_plat_data *pdata,
+					struct device_node *node)
+{
+	unsigned rsc_type = ~0x0;
+	pdata->rpm_pwr_ctrl = false;
+	if (of_property_read_u32(node, "qcom,resource-type",
+					&rsc_type))
+		return -EINVAL;
+	pdata->rpm_pwr_ctrl = true;
+	pdata->rpm_rsc_type = rsc_type;
+	return 0;
+
+}
+#endif /* CONFIG_MSM_OCMEM_LOCAL_POWER_CTRL */
+
 static struct ocmem_plat_data *parse_dt_config(struct platform_device *pdev)
 {
 	struct device   *dev = &pdev->dev;
@@ -393,6 +411,11 @@
 	} else
 		dev_dbg(dev, "Found %d ocmem partitions\n", nr_parts);
 
+	if (parse_power_ctrl_config(pdata, node)) {
+		dev_err(dev, "No OCMEM RPM Resource specified\n");
+		return NULL;
+	}
+
 	pdata->nr_parts = nr_parts;
 	pdata->parts = parts;
 	pdata->nr_regions = nr_regions;
@@ -516,6 +539,9 @@
 
 	platform_set_drvdata(pdev, ocmem_pdata);
 
+	if (ocmem_core_init(pdev))
+		return -EBUSY;
+
 	if (ocmem_zone_init(pdev))
 		return -EBUSY;
 
@@ -529,10 +555,7 @@
 							OCMEM_REGION_CTL_SIZE);
 	if (!ocmem_region_vbase)
 		return -EBUSY;
-	/* Enable all the 3 regions until we have support for power features */
-	writel_relaxed(REGION_ENABLE, ocmem_region_vbase);
-	writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 4);
-	writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 8);
+
 	/* Enable the ocmem graphics mpU as a workaround in Virtio */
 	/* This will be programmed by TZ after TZ support is integrated */
 	writel_relaxed(GRAPHICS_REGION_CTL, ocmem_region_vbase + 0xFCC);
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index bb32fca..a5aed5e 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -399,3 +399,22 @@
 	mutex_unlock(&ocmem_eviction_lock);
 	return ret;
 }
+
+/* Wrappers until power control is transitioned to clients */
+enum ocmem_power_state ocmem_get_power_state(int client_id,
+						struct ocmem_buf *buffer)
+{
+	return 0;
+}
+
+int ocmem_set_power_state(int client_id, struct ocmem_buf *buffer,
+					enum ocmem_power_state new_state)
+{
+	return 0;
+}
+
+struct ocmem_vectors *ocmem_get_vectors(int client_id,
+				struct ocmem_buf *buffer)
+{
+	return NULL;
+}
diff --git a/arch/arm/mach-msm/ocmem_core.c b/arch/arm/mach-msm/ocmem_core.c
new file mode 100644
index 0000000..019f59f
--- /dev/null
+++ b/arch/arm/mach-msm/ocmem_core.c
@@ -0,0 +1,708 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/ocmem_priv.h>
+#include <mach/rpm-smd.h>
+
+static unsigned num_regions;
+static unsigned num_macros;
+static unsigned num_ports;
+static unsigned num_banks;
+
+static unsigned long macro_size;
+static unsigned long region_size;
+
+static bool rpm_power_control;
+
+struct ocmem_hw_macro {
+	atomic_t m_on[OCMEM_CLIENT_MAX];
+	atomic_t m_retain[OCMEM_CLIENT_MAX];
+	unsigned m_state;
+};
+
+struct ocmem_hw_region {
+	unsigned psgsc_ctrl;
+	bool interleaved;
+	unsigned int mode;
+	unsigned int num_macros;
+	struct ocmem_hw_macro *macro;
+	struct msm_rpm_request *rpm_req;
+	unsigned r_state;
+};
+
+static struct ocmem_hw_region *region_ctrl;
+static struct mutex region_ctrl_lock;
+static void *ocmem_base;
+
+#define OCMEM_V1_REGIONS 3
+#define OCMEM_V1_MACROS 8
+
+#define OC_HW_VERS (0x0)
+#define OC_HW_PROFILE (0x4)
+#define OC_GEN_STATUS (0xC)
+#define OC_PSGSC_STATUS (0x38)
+#define OC_PSGSC_CTL (0x3C)
+#define OC_REGION_CTL (0x1000)
+
+#define NUM_PORTS_MASK (0xF << 0)
+#define NUM_PORTS_SHIFT (0)
+
+#define NUM_MACROS_MASK (0xF << 8)
+#define NUM_MACROS_SHIFT (8)
+
+#define INTERLEAVING_MASK (0x1 << 17)
+#define INTERLEAVING_SHIFT (17)
+
+/* Power states of each memory macro */
+#define PASSTHROUGH (0x0)
+#define CORE_ON (0x2)
+#define PERI_ON (0x1)
+#define CLK_OFF (0x4)
+#define MACRO_ON (CORE_ON|PERI_ON)
+#define MACRO_SLEEP_RETENTION (CLK_OFF|CORE_ON)
+#define MACRO_SLEEP_RETENTION_PERI_ON (CLK_OFF|MACRO_ON)
+#define MACRO_OFF (CLK_OFF)
+
+#define M_PSCGC_CTL_n(x) (0x7 << (x * 4))
+
+#define PSCGC_CTL_IDX(x) ((x) * 0x4)
+#define PSCGC_CTL_n(x) (OC_PSGSC_CTL + (PSCGC_CTL_IDX(x)))
+
+/* Power states of each ocmem region */
+#define REGION_NORMAL_PASSTHROUGH 0x00000000
+#define REGION_FORCE_PERI_ON 0x00001111
+#define REGION_FORCE_CORE_ON 0x00002222
+#define REGION_FORCE_ALL_ON 0x00003333
+#define REGION_SLEEP_NO_RETENTION 0x00004444
+#define REGION_SLEEP_PERI_OFF 0x00006666
+#define REGION_SLEEP_PERI_ON 0x00007777
+
+#define REGION_DEFAULT_OFF REGION_SLEEP_NO_RETENTION
+#define REGION_DEFAULT_ON REGION_FORCE_ALL_ON
+#define REGION_DEFAULT_RETENTION REGION_SLEEP_PERI_OFF
+
+enum rpm_macro_state {
+	rpm_macro_off = 0x0,
+	rpm_macro_retain,
+	rpm_macro_on,
+};
+
+static int rpm_write(unsigned long val, unsigned id);
+
+static inline unsigned hw_macro_state(unsigned region_state)
+{
+	unsigned macro_state;
+
+	switch (region_state) {
+	case REGION_DEFAULT_ON:
+		macro_state = MACRO_ON;
+		break;
+	case REGION_DEFAULT_OFF:
+		macro_state = MACRO_OFF;
+		break;
+	case REGION_DEFAULT_RETENTION:
+		macro_state = MACRO_SLEEP_RETENTION;
+		break;
+	default:
+		macro_state = MACRO_OFF;
+		break;
+	}
+	return macro_state;
+}
+
+static inline unsigned rpm_macro_state(unsigned hw_macro_state)
+{
+	unsigned macro_state;
+
+	switch (hw_macro_state) {
+	case MACRO_ON:
+		macro_state = rpm_macro_on;
+		break;
+	case MACRO_OFF:
+		macro_state = rpm_macro_off;
+		break;
+	case MACRO_SLEEP_RETENTION:
+		macro_state = rpm_macro_retain;
+		break;
+	default:
+		macro_state = rpm_macro_off;
+		break;
+	}
+	return macro_state;
+}
+
+/* Generic wrapper that sets the region state either
+   by a direct write or through appropriate RPM call
+*/
+/* Must be called with region mutex held */
+static int commit_region_state(unsigned region_num)
+{
+	int rc = -1;
+	unsigned new_state;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	new_state = region_ctrl[region_num].r_state;
+	pr_debug("ocmem: commit region (%d) new state %x\n", region_num,
+								new_state);
+	if (rpm_power_control)
+		rc = rpm_write(new_state, region_num);
+	else
+		rc = ocmem_write(new_state,
+					ocmem_base + PSCGC_CTL_n(region_num));
+	return 0;
+}
+
+/* Returns the current state of a OCMEM region */
+/* Must be called with region mutex held */
+static int read_region_state(unsigned region_num)
+{
+	int state;
+
+	pr_debug("rpm_get_region_state: #: %d\n", region_num);
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	if (rpm_power_control)
+		state = region_ctrl[region_num].r_state;
+	else
+		state = ocmem_read(ocmem_base + PSCGC_CTL_n(region_num));
+
+	pr_debug("ocmem: region (%d) state %x\n", region_num, state);
+
+	return state;
+}
+
+/* Returns the current state of a OCMEM macro that belongs to a region */
+static int read_macro_state(unsigned region_num, unsigned macro_num)
+{
+	int state;
+
+	if (macro_num >= num_banks)
+		return -EINVAL;
+
+	state = read_region_state(region_num);
+
+	if (state < 0)
+		return -EINVAL;
+
+	state &= M_PSCGC_CTL_n(macro_num);
+	state = state >> (macro_num * 4);
+
+	pr_debug("rpm_get_macro_state: macro (%d) region (%d) state %x\n",
+			macro_num, region_num, state);
+
+	return state;
+}
+
+static int apply_macro_vote(int id, unsigned region_num,
+				unsigned macro_num, int new_state)
+{
+	struct ocmem_hw_macro *m = NULL;
+	struct ocmem_hw_region *region = NULL;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	if (macro_num >= num_banks)
+		return -EINVAL;
+
+	region = &region_ctrl[region_num];
+
+	m = &region->macro[macro_num];
+
+	pr_debug("m (%d): curr state %x votes (on: %d retain %d) new state %x\n",
+			macro_num, m->m_state,
+			atomic_read(&m->m_on[id]),
+			atomic_read(&m->m_retain[id]),
+			new_state);
+
+	switch (m->m_state) {
+	case MACRO_OFF:
+		if (new_state == MACRO_ON)
+			atomic_inc(&m->m_on[id]);
+		break;
+	case MACRO_ON:
+		if (new_state == MACRO_OFF) {
+			atomic_dec(&m->m_on[id]);
+		} else if (new_state == MACRO_SLEEP_RETENTION) {
+			atomic_inc(&m->m_retain[id]);
+			atomic_dec(&m->m_on[id]);
+		}
+		break;
+	case MACRO_SLEEP_RETENTION:
+		if (new_state == MACRO_OFF) {
+			atomic_dec(&m->m_retain[id]);
+		} else if (new_state == MACRO_ON) {
+			atomic_inc(&m->m_on[id]);
+			atomic_dec(&m->m_retain[id]);
+		}
+		break;
+	}
+
+	pr_debug("macro (%d) region (%d) votes for %d (on: %d retain %d)\n",
+				region_num, macro_num, id,
+				atomic_read(&m->m_on[id]),
+				atomic_read(&m->m_retain[id]));
+	return 0;
+}
+
+static int aggregate_macro_state(unsigned region_num, unsigned macro_num)
+{
+	struct ocmem_hw_macro *m = NULL;
+	struct ocmem_hw_region *region = NULL;
+	int i = 0;
+	/* The default is for the macro to be OFF */
+	unsigned m_state = MACRO_OFF;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	if (macro_num >= num_banks)
+		return -EINVAL;
+
+	region = &region_ctrl[region_num];
+	m = &region->macro[macro_num];
+
+	for (i = 0; i < OCMEM_CLIENT_MAX; i++) {
+		if (atomic_read(&m->m_on[i]) > 0) {
+			/* atleast one client voted for ON state */
+			m_state = MACRO_ON;
+			goto done_aggregation;
+		} else if (atomic_read(&m->m_retain[i]) > 0) {
+			m_state = MACRO_SLEEP_RETENTION;
+			/* continue and examine votes of other clients */
+		}
+	}
+done_aggregation:
+	m->m_state = m_state;
+	pr_debug("macro (%d) region (%d) aggregated state %x", macro_num,
+						region_num, m->m_state);
+	return 0;
+}
+
+static int aggregate_region_state(unsigned region_num)
+{
+	struct ocmem_hw_region *region = NULL;
+	unsigned r_state;
+	unsigned i = 0;
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	region = &region_ctrl[region_num];
+	r_state = REGION_DEFAULT_OFF;
+
+	/* In wide mode all macros must have the same state */
+	if (region->mode == WIDE_MODE) {
+		for (i = 0; i < region->num_macros; i++) {
+			if (region->macro[i].m_state == MACRO_ON) {
+				r_state = REGION_DEFAULT_ON;
+				break;
+			} else if (region->macro[i].m_state ==
+						MACRO_SLEEP_RETENTION) {
+				r_state = REGION_DEFAULT_RETENTION;
+			}
+		}
+	} else {
+	/* In narrow mode each macro is allowed to be in a different state */
+	/* The region mode is simply the collection of all macro states */
+		for (i = 0; i < region->num_macros; i++) {
+			r_state &= ~M_PSCGC_CTL_n(i);
+			r_state |= region->macro[i].m_state << (i * 4);
+		}
+	}
+
+	pr_debug("region (%d) curr state (%x) aggregated state (%x)\n",
+			region_num, region->r_state, r_state);
+	region->r_state = r_state;
+	return 0;
+}
+
+static int rpm_write(unsigned long val, unsigned id)
+{
+	int i = 0;
+	int ret = 0;
+	struct ocmem_hw_region *region;
+
+	region = &region_ctrl[id];
+
+	for (i = 0; i < region->num_macros; i++) {
+		unsigned macro_state;
+		unsigned rpm_state;
+
+		macro_state = read_macro_state(id, i);
+		rpm_state = rpm_macro_state(macro_state);
+
+		if (val == REGION_DEFAULT_ON) {
+			pr_debug("macro (%d) region (%d) -> active\n",
+				i, id);
+			rpm_state = rpm_macro_on;
+		}
+
+		if (val == REGION_DEFAULT_OFF) {
+			pr_debug("macro (%d) region (%d) -> off\n",
+				i, id);
+			rpm_state = rpm_macro_off;
+		}
+
+		ret = msm_rpm_add_kvp_data(region->rpm_req, i,
+						(u8 *) &rpm_state, 4);
+
+		if (ret < 0) {
+			pr_err("ocmem: Error adding key %d val %d on rsc %d\n",
+					i, rpm_state, id);
+			return -EINVAL;
+		}
+	}
+
+	ret = msm_rpm_send_request(region->rpm_req);
+
+	if (ret < 0) {
+		pr_err("ocmem: Error sending RPM request\n");
+		return -EINVAL;
+	}
+
+	pr_debug("Transmit request to rpm for region %d\n", id);
+	return 0;
+}
+
+
+#if defined(CONFIG_MSM_OCMEM_POWER_DEBUG)
+
+static int read_hw_region_state(unsigned region_num)
+{
+	int state;
+
+	pr_debug("rpm_get_region_state: #: %d\n", region_num);
+
+	if (region_num >= num_regions)
+		return -EINVAL;
+
+	state = ocmem_read(ocmem_base + PSCGC_CTL_n(region_num));
+
+	pr_debug("ocmem: region (%d) state %x\n", region_num, state);
+
+	return state;
+}
+
+int ocmem_region_toggle(unsigned int r_num)
+{
+	unsigned reboot_state = ~0x0;
+	unsigned m_num = 0;
+
+	mutex_lock(&region_ctrl_lock);
+	/* Turn on each macro at boot for quick hw sanity check */
+	reboot_state = read_hw_region_state(r_num);
+
+	if (reboot_state != REGION_DEFAULT_OFF) {
+		pr_err("Region %d not in power off state (%x)\n",
+				r_num, reboot_state);
+		goto toggle_fail;
+	}
+
+	for (m_num = 0; m_num < num_banks; m_num++) {
+		apply_macro_vote(0, r_num, m_num, MACRO_ON);
+		aggregate_macro_state(r_num, m_num);
+	}
+
+	aggregate_region_state(r_num);
+	commit_region_state(r_num);
+
+	reboot_state = read_hw_region_state(r_num);
+
+	if (reboot_state != REGION_DEFAULT_ON) {
+		pr_err("Failed to power on Region %d(state:%x)\n",
+				r_num, reboot_state);
+		goto toggle_fail;
+	}
+
+	/* Turn off all memory macros again */
+
+	for (m_num = 0; m_num < num_banks; m_num++) {
+		apply_macro_vote(0, r_num, m_num, MACRO_OFF);
+		aggregate_macro_state(r_num, m_num);
+	}
+
+	aggregate_region_state(r_num);
+	commit_region_state(r_num);
+
+	reboot_state = read_hw_region_state(r_num);
+
+	if (reboot_state != REGION_DEFAULT_OFF) {
+		pr_err("Failed to power off Region %d(state:%x)\n",
+				r_num, reboot_state);
+		goto toggle_fail;
+	}
+	mutex_unlock(&region_ctrl_lock);
+	return 0;
+
+toggle_fail:
+	mutex_unlock(&region_ctrl_lock);
+	return -EINVAL;
+}
+
+int memory_is_off(unsigned int num)
+{
+	if (read_hw_region_state(num) == REGION_DEFAULT_OFF)
+		return 1;
+	else
+		return 0;
+}
+
+#else
+int ocmem_region_toggle(unsigned int region_num)
+{
+	return 0;
+}
+
+int memory_is_off(unsigned int num)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_OCMEM_POWER_DEBUG */
+
+/* Memory Macro Power Transition Sequences
+ * Normal to Sleep With Retention:
+	REGION_DEFAULT_ON -> REGION_DEFAULT_RETENTION
+ * Sleep With Retention to Normal:
+	REGION_DEFAULT_RETENTION -> REGION_FORCE_CORE_ON -> REGION_DEFAULT_ON
+ * Normal to OFF:
+	REGION_DEFAULT_ON -> REGION_DEFAULT_OFF
+ * OFF to Normal:
+	REGION_DEFAULT_OFF -> REGION_DEFAULT_ON
+**/
+
+static int switch_power_state(int id, unsigned long offset, unsigned long len,
+			unsigned new_state)
+{
+	unsigned region_start = num_regions;
+	unsigned region_end = num_regions;
+	unsigned curr_state = 0x0;
+	int i = 0;
+	int j = 0;
+	unsigned start_m = num_banks;
+	unsigned end_m = num_banks;
+	unsigned long region_offset = 0;
+
+	if (offset < 0)
+		return -EINVAL;
+
+	if (len < macro_size)
+		return -EINVAL;
+
+
+	pr_debug("ocmem: power_transition to %x for client %d\n", new_state,
+							id);
+
+	region_start = offset / region_size;
+	region_end = (offset + len - 1) / region_size;
+
+	pr_debug("ocmem: region start %u end %u\n", region_start, region_end);
+
+	if (region_start >= num_regions ||
+		(region_end >= num_regions))
+			return -EINVAL;
+
+	mutex_lock(&region_ctrl_lock);
+
+	for (i = region_start; i <= region_end; i++) {
+
+		curr_state = read_region_state(i);
+
+		switch (curr_state) {
+		case REGION_DEFAULT_OFF:
+			if (new_state != REGION_DEFAULT_ON)
+				goto invalid_transition;
+			break;
+		case REGION_DEFAULT_RETENTION:
+			if (new_state != REGION_DEFAULT_ON)
+				goto invalid_transition;
+			break;
+		default:
+			break;
+		}
+
+		if (len >= region_size) {
+			pr_debug("switch: entire region (%d)\n", i);
+			start_m = 0;
+			end_m = num_banks;
+		} else {
+			region_offset = offset - (i * region_size);
+			start_m = region_offset / macro_size;
+			end_m = (region_offset + len - 1) / macro_size;
+			pr_debug("switch: macro (%u to %u)\n", start_m, end_m);
+		}
+
+		for (j = start_m; j <= end_m; j++) {
+			pr_debug("vote: macro (%d) region (%d)\n", j, i);
+			apply_macro_vote(id, i, j,
+				hw_macro_state(new_state));
+			aggregate_macro_state(i, j);
+		}
+		aggregate_region_state(i);
+		commit_region_state(i);
+		len -= region_size;
+
+		/* If we voted ON/retain the banks must never be OFF */
+		if (new_state != REGION_DEFAULT_OFF) {
+			if (memory_is_off(i)) {
+				pr_err("ocmem: Accessing memory during sleep\n");
+				WARN_ON(1);
+			}
+		}
+
+	}
+	mutex_unlock(&region_ctrl_lock);
+	return 0;
+invalid_transition:
+	mutex_unlock(&region_ctrl_lock);
+	pr_err("ocmem_core: Invalid state transition detected for %d\n", id);
+	pr_err("ocmem_core: Offset %lx Len %lx curr_state %x new_state %x\n",
+			offset, len, curr_state, new_state);
+	WARN_ON(1);
+	return -EINVAL;
+}
+
+/* Interfaces invoked from the scheduler */
+int ocmem_memory_off(int id, unsigned long offset, unsigned long len)
+{
+	return switch_power_state(id, offset, len, REGION_DEFAULT_OFF);
+}
+
+int ocmem_memory_on(int id, unsigned long offset, unsigned long len)
+{
+	return switch_power_state(id, offset, len, REGION_DEFAULT_ON);
+}
+
+int ocmem_memory_retain(int id, unsigned long offset, unsigned long len)
+{
+	return switch_power_state(id, offset, len, REGION_DEFAULT_RETENTION);
+}
+
+int ocmem_core_init(struct platform_device *pdev)
+{
+	struct device   *dev = &pdev->dev;
+	struct ocmem_plat_data *pdata = NULL;
+	unsigned hw_ver;
+	bool interleaved;
+	unsigned i, j, k;
+	unsigned rsc_type = 0;
+
+	pdata = platform_get_drvdata(pdev);
+	ocmem_base = pdata->reg_base;
+
+	hw_ver = ocmem_read(ocmem_base + OC_HW_PROFILE);
+
+	if (pdata->nr_regions != OCMEM_V1_REGIONS) {
+		pr_err("Invalid number of regions (%d)\n", pdata->nr_regions);
+		goto hw_not_supported;
+	}
+
+	num_macros = (hw_ver & NUM_MACROS_MASK) >> NUM_MACROS_SHIFT;
+	num_ports = (hw_ver & NUM_PORTS_MASK) >> NUM_PORTS_SHIFT;
+
+	if (num_macros != OCMEM_V1_MACROS) {
+		pr_err("Invalid number of macros (%d)\n", pdata->nr_macros);
+		goto hw_not_supported;
+	}
+
+	interleaved = (hw_ver & INTERLEAVING_MASK) >> INTERLEAVING_SHIFT;
+
+	if (interleaved == false) {
+		pr_err("Interleaving is disabled\n");
+		goto hw_not_supported;
+	}
+
+	num_regions = pdata->nr_regions;
+
+	pdata->interleaved = true;
+	pdata->nr_macros = num_macros;
+	pdata->nr_ports = num_ports;
+	macro_size = SZ_64K;
+	region_size = macro_size * num_ports;
+	num_banks = num_ports / 2;
+	rsc_type = pdata->rpm_rsc_type;
+
+	pr_debug("ocmem_core: ports %d regions %d macros %d interleaved %d\n",
+				num_ports, num_regions, num_macros,
+				interleaved);
+
+	region_ctrl = devm_kzalloc(dev, sizeof(struct ocmem_hw_region)
+					 * num_regions, GFP_KERNEL);
+
+	if (!region_ctrl) {
+		pr_err("ocmem: Unable to allocate memory\n");
+		return -EINVAL;
+	}
+
+	mutex_init(&region_ctrl_lock);
+
+	for (i = 0 ; i < num_regions; i++) {
+		struct ocmem_hw_region *region = &region_ctrl[i];
+		struct msm_rpm_request *req = NULL;
+		region->interleaved = interleaved;
+		region->mode = MODE_DEFAULT;
+		region->r_state = REGION_DEFAULT_OFF;
+		region->num_macros = num_banks;
+
+		region->macro = devm_kzalloc(dev,
+					sizeof(struct ocmem_hw_macro) *
+						num_banks, GFP_KERNEL);
+		if (!region->macro) {
+			pr_err("ocmem: Unable to allocate memory\n");
+			return -EINVAL;
+		}
+
+		for (j = 0; j < num_banks; j++) {
+			struct ocmem_hw_macro *m = &region->macro[j];
+			m->m_state = MACRO_OFF;
+			for (k = 0; k < OCMEM_CLIENT_MAX; k++) {
+				atomic_set(&m->m_on[k], 0);
+				atomic_set(&m->m_retain[k], 0);
+			}
+		}
+
+		if (pdata->rpm_pwr_ctrl) {
+			rpm_power_control = true;
+			req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET,
+					rsc_type, i, num_banks);
+
+			if (!req) {
+				pr_err("Unable to create RPM request\n");
+				return -EINVAL;
+			}
+
+			pr_debug("rpm request type %x (rsc: %d) with %d elements\n",
+						rsc_type, i, num_banks);
+
+			region->rpm_req = req;
+		}
+
+		if (ocmem_region_toggle(i)) {
+			pr_err("Failed to verify region %d\n", i);
+			goto hw_not_supported;
+		}
+
+	}
+	return 0;
+hw_not_supported:
+	pr_err("Unsupported OCMEM h/w configuration %x\n", hw_ver);
+	return -EINVAL;
+}
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
index 6b93d04..5649021 100644
--- a/arch/arm/mach-msm/ocmem_rdm.c
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -109,19 +109,6 @@
 	unsigned int ctrl;
 } dm_table[RDM_MAX_ENTRIES];
 
-/* Wrapper that will shadow these values later */
-static int ocmem_read(void *at)
-{
-	return readl_relaxed(at);
-}
-
-/* Wrapper that will shadow these values later */
-static int ocmem_write(unsigned long val, void *at)
-{
-	writel_relaxed(val, at);
-	return 0;
-}
-
 static inline int client_ctrl_id(int id)
 {
 	return (id == OCMEM_SENSORS) ? 1 : 0;
diff --git a/arch/arm/mach-msm/ocmem_sched.c b/arch/arm/mach-msm/ocmem_sched.c
index f6d066d..70e6860 100644
--- a/arch/arm/mach-msm/ocmem_sched.c
+++ b/arch/arm/mach-msm/ocmem_sched.c
@@ -1083,6 +1083,7 @@
 static int process_grow(struct ocmem_req *req)
 {
 	int rc = 0;
+	unsigned long offset = 0;
 
 	/* Attempt to grow the region */
 	rc = do_grow(req);
@@ -1097,6 +1098,15 @@
 			return -EINVAL;
 	}
 
+	offset = phys_to_offset(req->req_start);
+
+	rc = ocmem_memory_on(req->owner, offset, req->req_sz);
+
+	if (rc < 0) {
+		pr_err("Failed to switch ON memory macros\n");
+		goto power_ctl_error;
+	}
+
 	/* Notify the client about the buffer growth */
 	rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer);
 	if (rc < 0) {
@@ -1105,6 +1115,8 @@
 		BUG();
 	}
 	return 0;
+power_ctl_error:
+	return -EINVAL;
 }
 
 static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
@@ -1187,6 +1199,7 @@
 {
 	struct ocmem_req *req = NULL;
 	struct ocmem_buf *buffer = NULL;
+	unsigned long offset = 0;
 	int rc = 0;
 
 	if (is_blocked(id)) {
@@ -1211,6 +1224,20 @@
 			return -EINVAL;
 	}
 
+
+	if (req->req_sz != 0) {
+
+		offset = phys_to_offset(req->req_start);
+
+		rc = ocmem_memory_off(req->owner, offset, req->req_sz);
+
+		if (rc < 0) {
+			pr_err("Failed to switch OFF memory macros\n");
+			return -EINVAL;
+		}
+
+	}
+
 	rc = do_free(req);
 
 	if (rc < 0)
@@ -1244,7 +1271,6 @@
 		event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
 	else
 		event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
-
 	up_write(&req->rw_sem);
 	kfree(work_data);
 	dispatch_notification(id, event, buffer);
@@ -1300,6 +1326,7 @@
 		goto transfer_out_error;
 	}
 
+
 	return 0;
 
 transfer_out_error:
@@ -1538,6 +1565,7 @@
 	struct ocmem_req *req = NULL;
 	struct ocmem_buf *buffer = NULL;
 	int rc = 0;
+	unsigned long offset = 0;
 
 	/* sanity checks */
 	if (is_blocked(id)) {
@@ -1579,8 +1607,21 @@
 			goto map_error;
 	}
 
+	if (req->req_sz != 0) {
+
+		offset = phys_to_offset(req->req_start);
+
+		rc = ocmem_memory_on(req->owner, offset, req->req_sz);
+
+		if (rc < 0) {
+			pr_err("Failed to switch ON memory macros\n");
+			goto power_ctl_error;
+		}
+	}
+
 	return 0;
 
+power_ctl_error:
 map_error:
 	handle->req = NULL;
 	do_free(req);
@@ -1595,6 +1636,7 @@
 	struct ocmem_handle *handle = NULL;
 	int rc = 0;
 	int id = req->owner;
+	unsigned long offset = 0;
 
 	handle = req_to_handle(req);
 	BUG_ON(handle == NULL);
@@ -1611,6 +1653,18 @@
 			goto map_error;
 	}
 
+	if (req->req_sz != 0) {
+
+		offset = phys_to_offset(req->req_start);
+
+		rc = ocmem_memory_on(req->owner, offset, req->req_sz);
+
+		if (rc < 0) {
+			pr_err("Failed to switch ON memory macros\n");
+			goto power_ctl_error;
+		}
+	}
+
 	/* Notify the client about the buffer growth */
 	rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer);
 	if (rc < 0) {
@@ -1620,6 +1674,7 @@
 	}
 	return 0;
 
+power_ctl_error:
 map_error:
 	handle->req = NULL;
 	do_free(req);
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 8897cb5..01cdb0b 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -207,11 +207,12 @@
 	mb();
 	usleep_range(1000, 2000);
 
-	/* Deassert reset to Pronto */
+	/* Deassert reset to subsystem and wait for propagation */
 	reg = readl_relaxed(drv->reset_base);
 	reg &= ~CLK_CTL_WCNSS_RESTART_BIT;
 	writel_relaxed(reg, drv->reset_base);
 	mb();
+	udelay(2);
 
 	return 0;
 }
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index ff0e792d..5c9c3c4 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -237,7 +237,7 @@
 	if (IS_ERR(drv->vreg))
 		return PTR_ERR(drv->vreg);
 
-	ret = regulator_set_voltage(drv->vreg, 1150000, 1150000);
+	ret = regulator_set_voltage(drv->vreg, 1050000, 1050000);
 	if (ret)
 		dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
 
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 2bb82ce..b117309 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -274,8 +274,6 @@
 
 static void bluesleep_tx_timer_expire(unsigned long data)
 {
-	unsigned long irq_flags;
-
 	if (!test_bit(BT_SLEEPENABLE, &flags))
 		return;
 	BT_DBG("Tx timer expired");
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 95a85f2a..0febaf3 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -251,6 +251,7 @@
 	int logging_mode;
 	int mask_check;
 	int logging_process_id;
+	struct task_struct *socket_process;
 #ifdef CONFIG_DIAG_SDIO_PIPE
 	unsigned char *buf_in_sdio;
 	unsigned char *usb_buf_mdm_out;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 240a514..30504bc 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -230,6 +230,13 @@
 	* This call will remove any pending registrations of such client
 	*/
 	diagchar_ioctl(NULL, DIAG_IOCTL_DCI_DEINIT, 0);
+
+	/* If the exiting process is the socket process */
+	if (driver->socket_process &&
+		(driver->socket_process->tgid == current->tgid)) {
+		driver->socket_process = NULL;
+	}
+
 #ifdef CONFIG_DIAG_OVER_USB
 	/* If the SD logging process exits, change logging to USB mode */
 	if (driver->logging_process_id == current->tgid) {
@@ -344,6 +351,7 @@
 	void *temp_buf;
 	uint16_t support_list = 0;
 	struct dci_notification_tbl *notify_params;
+	int status;
 
 	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
 		struct bindpkt_params_per_process *pkt_params =
@@ -480,12 +488,32 @@
 		mutex_lock(&driver->diagchar_mutex);
 		temp = driver->logging_mode;
 		driver->logging_mode = (int)ioarg;
-		if (driver->logging_mode == MEMORY_DEVICE_MODE)
+		if (driver->logging_mode == MEMORY_DEVICE_MODE) {
 			driver->mask_check = 1;
+			if (driver->socket_process) {
+				/*
+				 * Notify the socket logging process that we
+				 * are switching to MEMORY_DEVICE_MODE
+				 */
+				status = send_sig(SIGCONT,
+					 driver->socket_process, 0);
+				if (status) {
+					pr_err("diag: %s, Error notifying ",
+						__func__);
+					pr_err("socket process, status: %d\n",
+						status);
+				}
+			}
+		}
 		if (driver->logging_mode == UART_MODE) {
 			driver->mask_check = 0;
 			driver->logging_mode = MEMORY_DEVICE_MODE;
 		}
+		if (driver->logging_mode == SOCKET_MODE) {
+			driver->socket_process = current;
+			driver->mask_check = 0;
+			driver->logging_mode = MEMORY_DEVICE_MODE;
+		}
 		driver->logging_process_id = current->tgid;
 		mutex_unlock(&driver->diagchar_mutex);
 		if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
@@ -1251,6 +1279,7 @@
 		driver->poolsize_write_struct = poolsize_write_struct;
 		driver->num_clients = max_clients;
 		driver->logging_mode = USB_MODE;
+		driver->socket_process = NULL;
 		driver->mask_check = 0;
 		mutex_init(&driver->diagchar_mutex);
 		init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index eec3fe0..c65a000 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -18,15 +18,82 @@
 #include <linux/slab.h>
 #include <linux/memory_alloc.h>
 #include <linux/fmem.h>
+#include <linux/of.h>
 #include <mach/ion.h>
 #include <mach/msm_memtypes.h>
 #include "../ion_priv.h"
 #include "ion_cp_common.h"
 
+#define ION_COMPAT_STR	"qcom,msm-ion"
+#define ION_COMPAT_MEM_RESERVE_STR "qcom,msm-ion-reserve"
+
 static struct ion_device *idev;
 static int num_heaps;
 static struct ion_heap **heaps;
 
+struct ion_heap_desc {
+	unsigned int id;
+	enum ion_heap_type type;
+	const char *name;
+	unsigned int permission_type;
+};
+
+
+static struct ion_heap_desc ion_heap_meta[] = {
+	{
+		.id	= ION_SYSTEM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_SYSTEM,
+		.name	= ION_VMALLOC_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_MM_HEAP_NAME,
+		.permission_type = IPT_TYPE_MM_CARVEOUT,
+	},
+	{
+		.id	= ION_MM_FIRMWARE_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_MM_FIRMWARE_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MFC_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_MFC_HEAP_NAME,
+		.permission_type = IPT_TYPE_MFC_SHAREDMEM,
+	},
+	{
+		.id	= ION_SF_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_SF_HEAP_NAME,
+	},
+	{
+		.id	= ION_IOMMU_HEAP_ID,
+		.type	= ION_HEAP_TYPE_IOMMU,
+		.name	= ION_IOMMU_HEAP_NAME,
+	},
+	{
+		.id	= ION_QSECOM_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_QSECOM_HEAP_NAME,
+	},
+	{
+		.id	= ION_AUDIO_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_AUDIO_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_WB_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CP,
+		.name	= ION_WB_HEAP_NAME,
+	},
+	{
+		.id	= ION_CAMERA_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_CAMERA_HEAP_NAME,
+	},
+};
+
 struct ion_client *msm_ion_client_create(unsigned int heap_mask,
 					const char *name)
 {
@@ -269,11 +336,243 @@
 	}
 }
 
+static int msm_init_extra_data(struct ion_platform_heap *heap,
+			       const struct ion_heap_desc *heap_desc)
+{
+	int ret = 0;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_CP:
+	{
+		heap->extra_data = kzalloc(sizeof(struct ion_cp_heap_pdata),
+					   GFP_KERNEL);
+		if (!heap->extra_data) {
+			ret = -ENOMEM;
+		} else {
+			struct ion_cp_heap_pdata *extra = heap->extra_data;
+			extra->permission_type = heap_desc->permission_type;
+		}
+		break;
+	}
+	case ION_HEAP_TYPE_CARVEOUT:
+	{
+		heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata),
+					   GFP_KERNEL);
+		if (!heap->extra_data)
+			ret = -ENOMEM;
+		break;
+	}
+	default:
+		heap->extra_data = 0;
+		break;
+	}
+	return ret;
+}
+
+static int msm_ion_populate_heap(struct ion_platform_heap *heap)
+{
+	unsigned int i;
+	int ret = -EINVAL;
+	unsigned int len = ARRAY_SIZE(ion_heap_meta);
+	for (i = 0; i < len; ++i) {
+		if (ion_heap_meta[i].id == heap->id) {
+			heap->name = ion_heap_meta[i].name;
+			heap->type = ion_heap_meta[i].type;
+			ret = msm_init_extra_data(heap, &ion_heap_meta[i]);
+			break;
+		}
+	}
+	if (ret)
+		pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
+	return ret;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+	unsigned int i;
+	for (i = 0; i < pdata->nr; ++i)
+		kfree(pdata->heaps[i].extra_data);
+	kfree(pdata);
+}
+
+static int memtype_to_ion_memtype[] = {
+	[MEMTYPE_SMI_KERNEL] = ION_SMI_TYPE,
+	[MEMTYPE_SMI]	= ION_SMI_TYPE,
+	[MEMTYPE_EBI0] = ION_EBI_TYPE,
+	[MEMTYPE_EBI1] = ION_EBI_TYPE,
+};
+
+static void msm_ion_get_heap_align(struct device_node *node,
+				   struct ion_platform_heap *heap)
+{
+	unsigned int val;
+
+	int ret = of_property_read_u32(node, "qcom,heap-align", &val);
+	if (!ret) {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CP:
+		{
+			struct ion_cp_heap_pdata *extra =
+						heap->extra_data;
+			extra->align = val;
+			break;
+		}
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra =
+						heap->extra_data;
+			extra->align = val;
+			break;
+		}
+		default:
+			pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n",
+					heap->name);
+			break;
+		}
+	}
+}
+
+static int msm_ion_get_heap_size(struct device_node *node,
+				 struct ion_platform_heap *heap)
+{
+	unsigned int val;
+	int ret = 0;
+	const char *memory_name_prop;
+
+	ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
+	if (!ret) {
+		heap->size = val;
+		ret = of_property_read_string(node,
+					      "qcom,memory-reservation-type",
+					      &memory_name_prop);
+
+		if (!ret && memory_name_prop) {
+			val = msm_get_memory_type_from_name(memory_name_prop);
+			if (val < 0) {
+				ret = -EINVAL;
+				goto out;
+			}
+			heap->memory_type = memtype_to_ion_memtype[val];
+		}
+		if (heap->size && (ret || !memory_name_prop)) {
+			pr_err("%s: Need to specify reservation type\n",
+				__func__);
+			ret = -EINVAL;
+		}
+	} else {
+		ret = 0;
+	}
+out:
+	return ret;
+}
+
+
+static void msm_ion_get_heap_adjacent(struct device_node *node,
+				      struct ion_platform_heap *heap)
+{
+	unsigned int val;
+	int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val);
+	if (!ret) {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra = heap->extra_data;
+			extra->adjacent_mem_id = val;
+			break;
+		}
+		default:
+			pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n",
+				heap->name);
+			break;
+		}
+	} else {
+		switch (heap->type) {
+		case ION_HEAP_TYPE_CARVEOUT:
+		{
+			struct ion_co_heap_pdata *extra = heap->extra_data;
+			extra->adjacent_mem_id = INVALID_HEAP_ID;
+			break;
+		}
+		default:
+			break;
+		}
+	}
+}
+
+static struct ion_platform_data *msm_ion_parse_dt(
+					const struct device_node *dt_node)
+{
+	struct ion_platform_data *pdata = 0;
+	struct device_node *node;
+	uint32_t val = 0;
+	int ret = 0;
+	uint32_t num_heaps = 0;
+	int idx = 0;
+
+	for_each_child_of_node(dt_node, node)
+		num_heaps++;
+
+	if (!num_heaps)
+		return ERR_PTR(-EINVAL);
+
+	pdata = kzalloc(sizeof(struct ion_platform_data) +
+			num_heaps*sizeof(struct ion_platform_heap), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->nr = num_heaps;
+
+	for_each_child_of_node(dt_node, node) {
+		/**
+		 * TODO: Replace this with of_get_address() when this patch
+		 * gets merged: http://
+		 * permalink.gmane.org/gmane.linux.drivers.devicetree/18614
+		*/
+		ret = of_property_read_u32(node, "reg", &val);
+		if (ret) {
+			pr_err("%s: Unable to find reg key", __func__);
+			goto free_heaps;
+		}
+		pdata->heaps[idx].id = val;
+
+		ret = msm_ion_populate_heap(&pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_align(node, &pdata->heaps[idx]);
+
+		ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]);
+
+		++idx;
+	}
+	return pdata;
+
+free_heaps:
+	free_pdata(pdata);
+	return ERR_PTR(ret);
+}
+
 static int msm_ion_probe(struct platform_device *pdev)
 {
-	struct ion_platform_data *pdata = pdev->dev.platform_data;
-	int err;
+	struct ion_platform_data *pdata;
+	unsigned int pdata_needs_to_be_freed;
+	int err = -1;
 	int i;
+	if (pdev->dev.of_node) {
+		pdata = msm_ion_parse_dt(pdev->dev.of_node);
+		if (IS_ERR(pdata)) {
+			err = PTR_ERR(pdata);
+			goto out;
+		}
+		pdata_needs_to_be_freed = 1;
+	} else {
+		pdata = pdev->dev.platform_data;
+		pdata_needs_to_be_freed = 0;
+	}
 
 	num_heaps = pdata->nr;
 
@@ -315,6 +614,8 @@
 
 		ion_device_add_heap(idev, heaps[i]);
 	}
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
 
 	check_for_heap_overlap(pdata->heaps, num_heaps);
 	platform_set_drvdata(pdev, idev);
@@ -322,6 +623,8 @@
 
 freeheaps:
 	kfree(heaps);
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
 out:
 	return err;
 }
@@ -339,10 +642,19 @@
 	return 0;
 }
 
+static struct of_device_id msm_ion_match_table[] = {
+	{.compatible = ION_COMPAT_STR},
+	{},
+};
+EXPORT_COMPAT(ION_COMPAT_MEM_RESERVE_STR);
+
 static struct platform_driver msm_ion_driver = {
 	.probe = msm_ion_probe,
 	.remove = msm_ion_remove,
-	.driver = { .name = "ion-msm" }
+	.driver = {
+		.name = "ion-msm",
+		.of_match_table = msm_ion_match_table,
+	},
 };
 
 static int __init msm_ion_init(void)
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index 8ec9431..33fcbfd 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -248,6 +248,8 @@
 #define A3XX_VBIF_OUT_WR_LIM_CONF0 0x3035
 #define A3XX_VBIF_DDR_OUT_MAX_BURST 0x3036
 #define A3XX_VBIF_ARB_CTL 0x303C
+#define A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x3049
+#define A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x3058
 #define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
 #define A3XX_VBIF_OUT_AXI_AOOO 0x305F
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3047693..f7d1e59 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -15,8 +15,14 @@
 #include <linux/vmalloc.h>
 #include <linux/ioctl.h>
 #include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <mach/socinfo.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_dcvs.h>
+#include <mach/msm_dcvs_scm.h>
 
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
@@ -178,7 +184,9 @@
 	{ ADRENO_REV_A320, 3, 2, 0, ANY_ID,
 		"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
 		512, 0, 2, SZ_512K },
-
+	{ ADRENO_REV_A330, 3, 3, 0, 0,
+		"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+		512, 0, 2, SZ_1M },
 };
 
 static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
@@ -647,12 +655,520 @@
 	adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
 }
 
+static struct platform_device_id adreno_id_table[] = {
+	{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
+	{},
+};
+
+MODULE_DEVICE_TABLE(platform, adreno_id_table);
+
+static struct of_device_id adreno_match_table[] = {
+	{ .compatible = "qcom,kgsl-3d0", },
+	{}
+};
+
+static inline int adreno_of_read_property(struct device_node *node,
+	const char *prop, unsigned int *ptr)
+{
+	int ret = of_property_read_u32(node, prop, ptr);
+	if (ret)
+		KGSL_CORE_ERR("Unable to read '%s'\n", prop);
+	return ret;
+}
+
+static struct device_node *adreno_of_find_subnode(struct device_node *parent,
+	const char *name)
+{
+	struct device_node *child;
+
+	for_each_child_of_node(parent, child) {
+		if (of_device_is_compatible(child, name))
+			return child;
+	}
+
+	return NULL;
+}
+
+static int adreno_of_get_pwrlevels(struct device_node *parent,
+	struct kgsl_device_platform_data *pdata)
+{
+	struct device_node *node, *child;
+	int ret = -EINVAL;
+
+	node = adreno_of_find_subnode(parent, "qcom,gpu-pwrlevels");
+
+	if (node == NULL) {
+		KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
+		return -EINVAL;
+	}
+
+	pdata->num_levels = 0;
+
+	for_each_child_of_node(node, child) {
+		unsigned int index;
+		struct kgsl_pwrlevel *level;
+
+		if (adreno_of_read_property(child, "reg", &index))
+			goto done;
+
+		if (index >= KGSL_MAX_PWRLEVELS) {
+			KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
+				index);
+			continue;
+		}
+
+		if (index >= pdata->num_levels)
+			pdata->num_levels = index + 1;
+
+		level = &pdata->pwrlevel[index];
+
+		if (adreno_of_read_property(child, "qcom,gpu-freq",
+			&level->gpu_freq))
+			goto done;
+
+		if (adreno_of_read_property(child, "qcom,bus-freq",
+			&level->bus_freq))
+			goto done;
+
+		if (adreno_of_read_property(child, "qcom,io-fraction",
+			&level->io_fraction))
+			level->io_fraction = 0;
+	}
+
+	if (adreno_of_read_property(parent, "qcom,initial-pwrlevel",
+		&pdata->init_level))
+		pdata->init_level = 1;
+
+	if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
+		KGSL_CORE_ERR("Initial power level out of range\n");
+		pdata->init_level = 1;
+	}
+
+	ret = 0;
+done:
+	return ret;
+
+}
+static void adreno_of_free_bus_scale_info(struct msm_bus_scale_pdata *pdata)
+{
+	int i;
+
+	if (pdata == NULL)
+		return;
+
+	for (i = 0;  pdata->usecase && i < pdata->num_usecases; i++)
+		kfree(pdata->usecase[i].vectors);
+
+	kfree(pdata->usecase);
+	kfree(pdata);
+}
+
+struct msm_bus_scale_pdata *adreno_of_get_bus_scale(struct device_node *node)
+{
+	static int bus_vectors_src[3] = {MSM_BUS_MASTER_GRAPHICS_3D,
+		MSM_BUS_MASTER_GRAPHICS_3D_PORT1, MSM_BUS_MASTER_V_OCMEM_GFX3D};
+	static int bus_vectors_dst[2] = {MSM_BUS_SLAVE_EBI_CH0,
+		MSM_BUS_SLAVE_OCMEM};
+	const unsigned int *vectors;
+	struct msm_bus_scale_pdata *pdata;
+	int i, j, len, num_paths;
+	int ret = -EINVAL;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+	if (!pdata) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (adreno_of_read_property(node, "qcom,grp3d-num-bus-scale-usecases",
+		&pdata->num_usecases)) {
+		pdata->num_usecases = 0;
+		goto err;
+	}
+
+	pdata->usecase =  kzalloc(pdata->num_usecases *
+		sizeof(struct msm_bus_paths), GFP_KERNEL);
+
+	if (pdata->usecase == NULL) {
+		KGSL_CORE_ERR("kzalloc (%d) failed\n",
+			pdata->num_usecases * sizeof(struct msm_bus_paths));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (adreno_of_read_property(node, "qcom,grp3d-num-vectors-per-usecase",
+		&num_paths))
+		goto err;
+
+	vectors = of_get_property(node, "qcom,grp3d-vectors", &len);
+
+	if (len != pdata->num_usecases * num_paths *
+		sizeof(struct msm_bus_vectors)) {
+		KGSL_CORE_ERR("Invalid size for the bus scale vectors\n");
+		goto err;
+	}
+
+	for (i = 0; i < pdata->num_usecases; i++) {
+		pdata->usecase[i].num_paths = num_paths;
+		pdata->usecase[i].vectors = kzalloc(num_paths *
+						sizeof(struct msm_bus_vectors),
+						GFP_KERNEL);
+		if (!pdata->usecase[i].vectors) {
+			KGSL_CORE_ERR("kzalloc(%d) failed\n",
+				num_paths * sizeof(struct msm_bus_vectors));
+			ret = -ENOMEM;
+			goto err;
+		}
+		for (j = 0; j < num_paths; j++) {
+			int index = (i * num_paths + j) * 4;
+			pdata->usecase[i].vectors[j].src =
+				bus_vectors_src[be32_to_cpu(vectors[index])];
+			pdata->usecase[i].vectors[j].dst =
+				bus_vectors_dst[
+					be32_to_cpu(vectors[index + 1])];
+			pdata->usecase[i].vectors[j].ab =
+				be32_to_cpu(vectors[index + 2]);
+			pdata->usecase[i].vectors[j].ib =
+				KGSL_CONVERT_TO_MBPS(
+					be32_to_cpu(vectors[index + 3]));
+		}
+	}
+
+	pdata->name = "grp3d";
+
+	return pdata;
+
+err:
+	adreno_of_free_bus_scale_info(pdata);
+
+	return ERR_PTR(ret);
+}
+
+static struct msm_dcvs_core_info *adreno_of_get_dcvs(struct device_node *parent)
+{
+	struct device_node *node, *child;
+	struct msm_dcvs_core_info *info = NULL;
+	int count = 0;
+	int ret = -EINVAL;
+
+	node = adreno_of_find_subnode(parent, "qcom,dcvs-core-info");
+	if (node == NULL)
+		return ERR_PTR(-EINVAL);
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (info == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*info));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for_each_child_of_node(node, child)
+		count++;
+
+	info->core_param.num_freq = count;
+
+	info->freq_tbl = kzalloc(info->core_param.num_freq *
+			sizeof(struct msm_dcvs_freq_entry),
+			GFP_KERNEL);
+
+	if (info->freq_tbl == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			info->core_param.num_freq *
+			sizeof(struct msm_dcvs_freq_entry));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for_each_child_of_node(node, child) {
+		unsigned int index;
+
+		if (adreno_of_read_property(child, "reg", &index))
+			goto err;
+
+		if (index >= info->core_param.num_freq) {
+			KGSL_CORE_ERR("DCVS freq entry %d is out of range\n",
+				index);
+			continue;
+		}
+
+		if (adreno_of_read_property(child, "qcom,freq",
+			&info->freq_tbl[index].freq))
+			goto err;
+
+		if (adreno_of_read_property(child, "qcom,idle-energy",
+			&info->freq_tbl[index].idle_energy))
+			info->freq_tbl[index].idle_energy = 0;
+
+		if (adreno_of_read_property(child, "qcom,active-energy",
+			&info->freq_tbl[index].active_energy))
+			info->freq_tbl[index].active_energy = 0;
+	}
+
+	if (adreno_of_read_property(node, "qcom,core-max-time-us",
+		&info->core_param.max_time_us))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-slack-time-us",
+		&info->algo_param.slack_time_us))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-disable-pc-threshold",
+		&info->algo_param.disable_pc_threshold))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-ss-window-size",
+		&info->algo_param.ss_window_size))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-ss-util-pct",
+		&info->algo_param.ss_util_pct))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-em-max-util-pct",
+		&info->algo_param.em_max_util_pct))
+		goto err;
+
+	if (adreno_of_read_property(node, "qcom,algo-ss-iobusy-conv",
+		&info->algo_param.ss_iobusy_conv))
+		goto err;
+
+	return info;
+
+err:
+	if (info)
+		kfree(info->freq_tbl);
+
+	kfree(info);
+
+	return ERR_PTR(ret);
+}
+
+static int adreno_of_get_iommu(struct device_node *parent,
+	struct kgsl_device_platform_data *pdata)
+{
+	struct device_node *node, *child;
+	struct kgsl_device_iommu_data *data = NULL;
+	struct kgsl_iommu_ctx *ctxs = NULL;
+	u32 reg_val[2];
+	int ctx_index = 0;
+
+	node = of_parse_phandle(parent, "iommu", 0);
+	if (node == NULL)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (data == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*data));
+		goto err;
+	}
+
+	if (of_property_read_u32_array(node, "reg", reg_val, 2))
+		goto err;
+
+	data->physstart = reg_val[0];
+	data->physend = data->physstart + reg_val[1] - 1;
+
+	data->iommu_ctx_count = 0;
+
+	for_each_child_of_node(node, child)
+		data->iommu_ctx_count++;
+
+	ctxs = kzalloc(data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx),
+		GFP_KERNEL);
+
+	if (ctxs == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx));
+		goto err;
+	}
+
+	for_each_child_of_node(node, child) {
+		int ret = of_property_read_string(child, "label",
+				&ctxs[ctx_index].iommu_ctx_name);
+
+		if (ret) {
+			KGSL_CORE_ERR("Unable to read KGSL IOMMU 'label'\n");
+			goto err;
+		}
+
+		if (adreno_of_read_property(child, "qcom,iommu-ctx-sids",
+			&ctxs[ctx_index].ctx_id))
+			goto err;
+
+		ctx_index++;
+	}
+
+	data->iommu_ctxs = ctxs;
+
+	pdata->iommu_data = data;
+	pdata->iommu_count = 1;
+
+	return 0;
+
+err:
+	kfree(ctxs);
+	kfree(data);
+
+	return -EINVAL;
+}
+
+static int adreno_of_get_pdata(struct platform_device *pdev)
+{
+	struct kgsl_device_platform_data *pdata = NULL;
+	struct kgsl_device *device;
+	int ret = -EINVAL;
+
+	pdev->id_entry = adreno_id_table;
+
+	pdata = pdev->dev.platform_data;
+	if (pdata)
+		return 0;
+
+	if (of_property_read_string(pdev->dev.of_node, "label", &pdev->name)) {
+		KGSL_CORE_ERR("Unable to read 'label'\n");
+		goto err;
+	}
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,id", &pdev->id))
+		goto err;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (pdata == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,chipid",
+		&pdata->chipid))
+		goto err;
+
+	/* pwrlevel Data */
+	ret = adreno_of_get_pwrlevels(pdev->dev.of_node, pdata);
+	if (ret)
+		goto err;
+
+	/* Default value is 83, if not found in DT */
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
+		&pdata->idle_timeout))
+		pdata->idle_timeout = 83;
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,nap-allowed",
+		&pdata->nap_allowed))
+		pdata->nap_allowed = 1;
+
+	if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
+		&pdata->clk_map))
+		goto err;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+	if (device->id != KGSL_DEVICE_3D0)
+		goto err;
+
+	/* Bus Scale Data */
+
+	pdata->bus_scale_table = adreno_of_get_bus_scale(pdev->dev.of_node);
+	if (IS_ERR_OR_NULL(pdata->bus_scale_table)) {
+		ret = PTR_ERR(pdata->bus_scale_table);
+		goto err;
+	}
+
+	pdata->core_info = adreno_of_get_dcvs(pdev->dev.of_node);
+	if (IS_ERR_OR_NULL(pdata->core_info)) {
+		ret = PTR_ERR(pdata->core_info);
+		goto err;
+	}
+
+	ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
+	if (ret)
+		goto err;
+
+	pdev->dev.platform_data = pdata;
+	return 0;
+
+err:
+	if (pdata) {
+		adreno_of_free_bus_scale_info(pdata->bus_scale_table);
+		if (pdata->core_info)
+			kfree(pdata->core_info->freq_tbl);
+		kfree(pdata->core_info);
+
+		if (pdata->iommu_data)
+			kfree(pdata->iommu_data->iommu_ctxs);
+
+		kfree(pdata->iommu_data);
+	}
+
+	kfree(pdata);
+
+	return ret;
+}
+
+#ifdef CONFIG_MSM_OCMEM
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+	if (adreno_dev->gpurev != ADRENO_REV_A330)
+		return 0;
+
+	/* OCMEM is only needed once, do not support consective allocation */
+	if (adreno_dev->ocmem_hdl != NULL)
+		return 0;
+
+	adreno_dev->ocmem_hdl =
+		ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
+	if (adreno_dev->ocmem_hdl == NULL)
+		return -ENOMEM;
+
+	adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
+	adreno_dev->gmem_base = adreno_dev->ocmem_hdl->addr;
+
+	return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+	if (adreno_dev->gpurev != ADRENO_REV_A330)
+		return;
+
+	if (adreno_dev->ocmem_hdl == NULL)
+		return;
+
+	ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
+	adreno_dev->ocmem_hdl = NULL;
+}
+#else
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+	return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+}
+#endif
+
 static int __devinit
 adreno_probe(struct platform_device *pdev)
 {
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
 	int status = -EINVAL;
+	bool is_dt;
+
+	is_dt = of_match_device(adreno_match_table, &pdev->dev);
+
+	if (is_dt && pdev->dev.of_node) {
+		status = adreno_of_get_pdata(pdev);
+		if (status)
+			goto error_return;
+	}
 
 	device = (struct kgsl_device *)pdev->id_entry->driver_data;
 	adreno_dev = ADRENO_DEVICE(device);
@@ -678,6 +1194,7 @@
 	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
 error:
 	device->parentdev = NULL;
+error_return:
 	return status;
 }
 
@@ -740,6 +1257,12 @@
 	if (status)
 		goto error_clk_off;
 
+	status = adreno_ocmem_gmem_malloc(adreno_dev);
+	if (status) {
+		KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
+		goto error_mmu_off;
+	}
+
 	/* Start the GPU */
 	adreno_dev->gpudev->start(adreno_dev);
 
@@ -756,7 +1279,10 @@
 	}
 
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
+
 error_clk_off:
 	kgsl_pwrctrl_disable(device);
 
@@ -777,6 +1303,8 @@
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 	del_timer_sync(&device->idle_timer);
 
+	adreno_ocmem_gmem_free(adreno_dev);
+
 	/* Power down the device */
 	kgsl_pwrctrl_disable(device);
 
@@ -1590,9 +2118,8 @@
 			cmds[1] = 0;
 
 			if (adreno_dev->drawctxt_active)
-				adreno_ringbuffer_issuecmds(device,
-					adreno_dev->drawctxt_active,
-					KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
+				adreno_ringbuffer_issuecmds_intr(device,
+						context, &cmds[0], 2);
 			else
 				/* We would never call this function if there
 				 * was no active contexts running */
@@ -1927,12 +2454,6 @@
 	.setproperty = adreno_setproperty,
 };
 
-static struct platform_device_id adreno_id_table[] = {
-	{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
-	{ },
-};
-MODULE_DEVICE_TABLE(platform, adreno_id_table);
-
 static struct platform_driver adreno_platform_driver = {
 	.probe = adreno_probe,
 	.remove = __devexit_p(adreno_remove),
@@ -1943,6 +2464,7 @@
 		.owner = THIS_MODULE,
 		.name = DEVICE_3D_NAME,
 		.pm = &kgsl_pm_ops,
+		.of_match_table = adreno_match_table,
 	}
 };
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 57f4859..279e7ed 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -17,6 +17,7 @@
 #include "adreno_drawctxt.h"
 #include "adreno_ringbuffer.h"
 #include "kgsl_iommu.h"
+#include <mach/ocmem.h>
 
 #define DEVICE_3D_NAME "kgsl-3d"
 #define DEVICE_3D0_NAME "kgsl-3d0"
@@ -32,7 +33,7 @@
 /* Flags to control command packet settings */
 #define KGSL_CMD_FLAGS_NONE             0x00000000
 #define KGSL_CMD_FLAGS_PMODE		0x00000001
-#define KGSL_CMD_FLAGS_NO_TS_CMP	0x00000002
+#define KGSL_CMD_FLAGS_DUMMY_INTR_CMD	0x00000002
 
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
@@ -61,6 +62,7 @@
 	ADRENO_REV_A225 = 225,
 	ADRENO_REV_A305 = 305,
 	ADRENO_REV_A320 = 320,
+	ADRENO_REV_A330 = 330,
 };
 
 struct adreno_gpudev;
@@ -87,6 +89,7 @@
 	unsigned int instruction_size;
 	unsigned int ib_check_level;
 	unsigned int fast_hang_detect;
+	struct ocmem_buf *ocmem_hdl;
 };
 
 struct adreno_gpudev {
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 86fe3f5..5ba3778 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1822,7 +1822,8 @@
 	if (state) {
 		adreno_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK);
 		adreno_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK);
-		adreno_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+		adreno_regwrite(device, MH_INTERRUPT_MASK,
+			kgsl_mmu_get_int_mask());
 	} else {
 		adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
 		adreno_regwrite(device, REG_CP_INT_CNTL, 0);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index bb89067..2dbfd8f 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2701,24 +2701,46 @@
 	struct kgsl_device *device = &adreno_dev->dev;
 
 	/* Set up 16 deep read/write request queues */
+	if (adreno_dev->gpurev == ADRENO_REV_A330) {
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x00001818);
+		adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00001818);
+		adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00001818);
+		adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x00001818);
+		/* Enable WR-REQ */
+		adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF);
 
-	adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303);
-	adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
-	adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+		/* Set up round robin arbitration between both AXI ports */
+		adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+		/* Set up VBIF_ROUND_ROBIN_QOS_ARB */
+		adreno_regwrite(device, A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
 
-	/* Enable WR-REQ */
-	adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x000000FF);
+		/* Set up AOOO */
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x00000FFF);
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x0FFF0FFF);
 
-	/* Set up round robin arbitration between both AXI ports */
-	adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+		/* VBIF AXI AMEMTYPE CONFIG */
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0,
+			0x22222222);
+	} else {
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+		/* Enable WR-REQ */
+		adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF);
 
-	/* Set up AOOO */
-	adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C);
-	adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C);
+		/* Set up round robin arbitration between both AXI ports */
+		adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+		/* Set up AOOO */
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C);
+		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C);
+	}
 
 	if (cpu_is_apq8064()) {
 		/* Enable 1K sort */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 098c4f5..6c74dfa 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -147,6 +147,7 @@
 {
 	struct adreno_context *drawctxt;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 	int ret;
 
 	drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
@@ -157,6 +158,7 @@
 	drawctxt->pagetable = pagetable;
 	drawctxt->bin_base_offset = 0;
 	drawctxt->id = context->id;
+	rb->timestamp[context->id] = 0;
 
 	if (flags & KGSL_CONTEXT_PREAMBLE)
 		drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
@@ -174,6 +176,12 @@
 	kgsl_sharedmem_writel(&device->memstore,
 			KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
 			KGSL_INIT_REFTIMESTAMP);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_MEMSTORE_OFFSET(drawctxt->id, ts_cmp_enable), 0);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_MEMSTORE_OFFSET(drawctxt->id, soptimestamp), 0);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_MEMSTORE_OFFSET(drawctxt->id, eoptimestamp), 0);
 
 	context->devctxt = drawctxt;
 	return 0;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 2a6e39b..49786ba 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -491,9 +491,9 @@
 	*  error checking if needed
 	*/
 	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
-	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
 	/* 2 dwords to store the start of command sequence */
 	total_sizedwords += 2;
+	total_sizedwords += context ? 7 : 0;
 
 	if (adreno_is_a3xx(adreno_dev))
 		total_sizedwords += 7;
@@ -545,9 +545,10 @@
 
 	/* always increment the global timestamp. once. */
 	rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
-	if (context) {
+
+	if (context && !(flags & KGSL_CMD_FLAGS_DUMMY_INTR_CMD)) {
 		if (context_id == KGSL_MEMSTORE_GLOBAL)
-			rb->timestamp[context_id] =
+			rb->timestamp[context->id] =
 				rb->timestamp[KGSL_MEMSTORE_GLOBAL];
 		else
 			rb->timestamp[context_id]++;
@@ -577,7 +578,7 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(context->id, soptimestamp)));
+			KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
 
 		/* end-of-pipeline timestamp */
@@ -585,14 +586,14 @@
 			cp_type3_packet(CP_EVENT_WRITE, 3));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp)));
+			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
 
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			      KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-				      eoptimestamp)));
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+				eoptimestamp)));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 	} else {
@@ -600,13 +601,11 @@
 			cp_type3_packet(CP_EVENT_WRITE, 3));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			      KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-				      eoptimestamp)));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-			rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[context_id]);
 	}
 
-	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
+	if (context) {
 		/* Conditional execution based on memory values */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_COND_EXEC, 4));
@@ -638,6 +637,30 @@
 	return timestamp;
 }
 
+void
+adreno_ringbuffer_issuecmds_intr(struct kgsl_device *device,
+						struct kgsl_context *k_ctxt,
+						unsigned int *cmds,
+						int sizedwords)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	struct adreno_context *a_ctxt = NULL;
+
+	if (!k_ctxt)
+		return;
+
+	a_ctxt = k_ctxt->devctxt;
+
+	if (k_ctxt->id == KGSL_CONTEXT_INVALID ||
+		a_ctxt == NULL ||
+		device->state & KGSL_STATE_HUNG)
+		return;
+
+	adreno_ringbuffer_addcmds(rb, a_ctxt, KGSL_CMD_FLAGS_DUMMY_INTR_CMD,
+			cmds, sizedwords);
+}
+
 unsigned int
 adreno_ringbuffer_issuecmds(struct kgsl_device *device,
 						struct adreno_context *drawctxt,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 4cc57c2..6c3d9b1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -110,6 +110,11 @@
 					unsigned int *cmdaddr,
 					int sizedwords);
 
+void adreno_ringbuffer_issuecmds_intr(struct kgsl_device *device,
+					struct kgsl_context *k_ctxt,
+					unsigned int *cmdaddr,
+					int sizedwords);
+
 void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb);
 
 void kgsl_cp_intrcallback(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index bc6ec8e..5293d66 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -299,4 +299,14 @@
 		(gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
 }
 
+static inline unsigned int kgsl_mmu_get_int_mask(void)
+{
+	/* Dont enable gpummu interrupts, if iommu is enabled */
+	if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
+		return KGSL_MMU_INT_MASK;
+	else
+		return (MH_INTERRUPT_MASK__AXI_READ_ERROR |
+			MH_INTERRUPT_MASK__AXI_WRITE_ERROR);
+}
+
 #endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index bfe6957..6d4d4d3 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -439,8 +439,8 @@
 		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
 			&pwr->power_flags)) {
 			trace_kgsl_rail(device, state);
-			if (pwr->gpu_dig)
-				regulator_disable(pwr->gpu_dig);
+			if (pwr->gpu_cx)
+				regulator_disable(pwr->gpu_cx);
 			if (pwr->gpu_reg)
 				regulator_disable(pwr->gpu_reg);
 		}
@@ -456,8 +456,8 @@
 							"failed: %d\n",
 							status);
 			}
-			if (pwr->gpu_dig) {
-				int status = regulator_enable(pwr->gpu_dig);
+			if (pwr->gpu_cx) {
+				int status = regulator_enable(pwr->gpu_cx);
 				if (status)
 					KGSL_DRV_ERR(device,
 							"cx regulator_enable "
@@ -547,11 +547,11 @@
 		pwr->gpu_reg = NULL;
 
 	if (pwr->gpu_reg) {
-		pwr->gpu_dig = regulator_get(&pdev->dev, "vdd_dig");
-		if (IS_ERR(pwr->gpu_dig))
-			pwr->gpu_dig = NULL;
+		pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
+		if (IS_ERR(pwr->gpu_cx))
+			pwr->gpu_cx = NULL;
 	} else
-		pwr->gpu_dig = NULL;
+		pwr->gpu_cx = NULL;
 
 	pwr->power_flags = 0;
 
@@ -615,9 +615,9 @@
 		pwr->gpu_reg = NULL;
 	}
 
-	if (pwr->gpu_dig) {
-		regulator_put(pwr->gpu_dig);
-		pwr->gpu_dig = NULL;
+	if (pwr->gpu_cx) {
+		regulator_put(pwr->gpu_cx);
+		pwr->gpu_cx = NULL;
 	}
 
 	for (i = 1; i < KGSL_MAX_CLKS; i++)
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 954c818..cd44152 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -50,7 +50,7 @@
 	unsigned int interval_timeout;
 	bool strtstp_sleepwake;
 	struct regulator *gpu_reg;
-	struct regulator *gpu_dig;
+	struct regulator *gpu_cx;
 	uint32_t pcl;
 	unsigned int nap_allowed;
 	unsigned int idle_needed;
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
index baa0407..c6f8b1b 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -17,6 +17,7 @@
 #include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
 #include "a2xx_reg.h"
+#include "kgsl_trace.h"
 
 struct msm_priv {
 	struct kgsl_device *device;
@@ -92,6 +93,7 @@
 	struct msm_priv *priv = pwrscale->priv;
 	if (priv->enabled && !priv->gpu_busy) {
 		msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_EXIT, 0);
+		trace_kgsl_mpdcvs(device, 1);
 		priv->gpu_busy = 1;
 	}
 	return;
@@ -105,6 +107,7 @@
 	if (priv->enabled && priv->gpu_busy)
 		if (device->ftbl->isidle(device)) {
 			msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+			trace_kgsl_mpdcvs(device, 0);
 			priv->gpu_busy = 0;
 		}
 	return;
@@ -117,6 +120,7 @@
 
 	if (priv->enabled && priv->gpu_busy) {
 		msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+		trace_kgsl_mpdcvs(device, 0);
 		priv->gpu_busy = 0;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 3eff40f..81ab3fb 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -251,6 +251,29 @@
 	)
 );
 
+TRACE_EVENT(kgsl_mpdcvs,
+
+	TP_PROTO(struct kgsl_device *device, unsigned int state),
+
+	TP_ARGS(device, state),
+
+	TP_STRUCT__entry(
+		__string(device_name, device->name)
+		__field(unsigned int, state)
+	),
+
+	TP_fast_assign(
+		__assign_str(device_name, device->name);
+		__entry->state = state;
+	),
+
+	TP_printk(
+		"d_name=%s %s",
+		__get_str(device_name),
+		__entry->state ? "BUSY" : "IDLE"
+	)
+);
+
 DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
 	TP_PROTO(struct kgsl_device *device, unsigned int state),
 
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 6efba45..3504dfc 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -892,7 +892,8 @@
 
 	if (state) {
 		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
-		z180_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+		z180_regwrite(device, MH_INTERRUPT_MASK,
+			kgsl_mmu_get_int_mask());
 	} else {
 		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
 		z180_regwrite(device, MH_INTERRUPT_MASK, 0);
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index b14d4f6..e5c1091 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -189,12 +189,25 @@
 {
 	int rc = 0, i, j;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
+	struct msm_cam_media_controller *pmctl;
+	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
 
 	mutex_lock(&pcam_inst->inst_lock);
+	if (!pcam_inst->vbqueue_initialized && pb->count) {
+		pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+		if (pmctl == NULL) {
+			pr_err("%s Invalid mctl ptr", __func__);
+			return -EINVAL;
+		}
+		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
+			pb->type);
+		pcam_inst->vbqueue_initialized = 1;
+	}
+
 	rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
 	if (rc < 0) {
 		pr_err("%s reqbufs failed %d ", __func__, rc);
@@ -564,7 +577,6 @@
 	int rc;
 	/* get the video device */
 	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
@@ -575,16 +587,6 @@
 		(void *)pfmt->fmt.pix.priv);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (pmctl == NULL)
-		return -EINVAL;
-
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
-
 	mutex_lock(&pcam->vid_lock);
 
 	rc = msm_server_set_fmt(pcam, pcam_inst->my_index, pfmt);
@@ -602,7 +604,6 @@
 {
 	int rc;
 	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 			struct msm_cam_v4l2_dev_inst, eventHandle);
@@ -610,16 +611,6 @@
 	D("%s Inst %p\n", __func__, pcam_inst);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (pmctl == NULL)
-		return -EINVAL;
-
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
-
 	mutex_lock(&pcam->vid_lock);
 	rc = msm_server_set_fmt_mplane(pcam, pcam_inst->my_index, pfmt);
 	mutex_unlock(&pcam->vid_lock);
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index a87b074..a8d74a7 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -130,6 +130,14 @@
 	.pxlcode	= V4L2_MBUS_FMT_SGRBG10_1X10, /* Bayer sensor */
 	.colorspace = V4L2_COLORSPACE_JPEG,
 	},
+	{
+	.name	   = "YUYV",
+	.depth	  = 16,
+	.bitsperpxl = 16,
+	.fourcc	 = V4L2_PIX_FMT_YUYV,
+	.pxlcode	= V4L2_MBUS_FMT_YUYV8_2X8, /* YUV sensor */
+	.colorspace = V4L2_COLORSPACE_JPEG,
+	},
 
 };
 
@@ -853,6 +861,7 @@
 
 	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
 	mutex_lock(&pcam->mctl_node.dev_lock);
+	mutex_lock(&pcam_inst->inst_lock);
 	D("%s : active %d ", __func__, pcam->mctl_node.active);
 	if (pcam->mctl_node.active == 1) {
 		rc = msm_cam_server_close_mctl_session(pcam);
@@ -865,6 +874,7 @@
 		pmctl = NULL;
 	}
 	pcam_inst->streamon = 0;
+	pcam->mctl_node.use_count--;
 	pcam->mctl_node.dev_inst_map[pcam_inst->image_mode] = NULL;
 	if (pcam_inst->vbqueue_initialized)
 		vb2_queue_release(&pcam_inst->vid_bufq);
@@ -873,17 +883,16 @@
 	msm_destroy_v4l2_event_queue(&pcam_inst->eventHandle);
 	CLR_MCTLPP_INST_IDX(pcam_inst->inst_handle);
 	CLR_IMG_MODE(pcam_inst->inst_handle);
-
+	mutex_unlock(&pcam_inst->inst_lock);
 	mutex_destroy(&pcam_inst->inst_lock);
 
 	kfree(pcam_inst);
+	f->private_data = NULL;
 	if (NULL != pmctl) {
 		D("%s : release ion client", __func__);
 		kref_put(&pmctl->refcount, msm_release_ion_client);
 	}
-	f->private_data = NULL;
 	mutex_unlock(&pcam->mctl_node.dev_lock);
-	pcam->mctl_node.use_count--;
 	D("%s : use_count %d X ", __func__, pcam->mctl_node.use_count);
 	return rc;
 }
@@ -988,12 +997,24 @@
 {
 	int rc = 0, i, j;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
+	struct msm_cam_media_controller *pmctl;
+	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
 	pcam_inst = container_of(f->private_data,
 		struct msm_cam_v4l2_dev_inst, eventHandle);
 	D("%s\n", __func__);
 	WARN_ON(pctx != f->private_data);
 
 	mutex_lock(&pcam_inst->inst_lock);
+	if (!pcam_inst->vbqueue_initialized && pb->count) {
+		pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+		if (pmctl == NULL) {
+			pr_err("%s Invalid mctl ptr", __func__);
+			return -EINVAL;
+		}
+		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
+			pb->type);
+		pcam_inst->vbqueue_initialized = 1;
+	}
 	rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
 	if (rc < 0) {
 		pr_err("%s reqbufs failed %d ", __func__, rc);
@@ -1306,30 +1327,10 @@
 					struct v4l2_format *pfmt)
 {
 	int rc = 0;
-	/* get the video device */
-	struct msm_cam_v4l2_device *pcam  = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
-	struct msm_cam_v4l2_dev_inst *pcam_inst;
-	pcam_inst = container_of(f->private_data,
-		struct msm_cam_v4l2_dev_inst, eventHandle);
 
 	D("%s\n", __func__);
-	D("%s, inst=0x%x,idx=%d,priv = 0x%p\n",
-		__func__, (u32)pcam_inst, pcam_inst->my_index,
-		(void *)pfmt->fmt.pix.priv);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (!pmctl) {
-		pr_err("%s mctl ptr is null ", __func__);
-		return -EINVAL;
-	}
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
-
 	return rc;
 }
 
@@ -1338,25 +1339,13 @@
 {
 	int rc = 0, i;
 	struct msm_cam_v4l2_device *pcam = video_drvdata(f);
-	struct msm_cam_media_controller *pmctl;
 	struct msm_cam_v4l2_dev_inst *pcam_inst;
 	pcam_inst = container_of(f->private_data,
 			struct msm_cam_v4l2_dev_inst, eventHandle);
 
-	D("%s Inst %p vbqueue %d\n", __func__,
-		pcam_inst, pcam_inst->vbqueue_initialized);
+	D("%s Inst %p\n", __func__, pcam_inst);
 	WARN_ON(pctx != f->private_data);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (!pmctl) {
-		pr_err("%s mctl ptr is null ", __func__);
-		return -EINVAL;
-	}
-	if (!pcam_inst->vbqueue_initialized) {
-		pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
-					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-		pcam_inst->vbqueue_initialized = 1;
-	}
 	for (i = 0; i < pcam->num_fmts; i++)
 		if (pcam->usr_fmts[i].fourcc == pfmt->fmt.pix_mp.pixelformat)
 			break;
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index a69858a..9f7f689 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -212,11 +212,6 @@
 	pcam = pcam_inst->pcam;
 	buf = container_of(vb, struct msm_frame_buffer, vidbuf);
 
-	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
-	if (pmctl == NULL) {
-		pr_err("%s No mctl found\n", __func__);
-		return;
-	}
 
 	if (pcam_inst->vid_fmt.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		for (i = 0; i < vb->num_planes; i++) {
@@ -262,6 +257,12 @@
 		}
 		spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
 	}
+	pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+	if (pmctl == NULL) {
+		pr_err("%s No mctl found\n", __func__);
+		buf->state = MSM_BUFFER_STATE_UNUSED;
+		return;
+	}
 	for (i = 0; i < vb->num_planes; i++) {
 		mem = vb2_plane_cookie(vb, i);
 		videobuf2_pmem_contig_user_put(mem, pmctl->client);
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index 2cc61a1..7d58091 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -1565,11 +1565,16 @@
 	int rc = -EINVAL;
 	uint32_t mctl_handle = 0;
 	struct msm_cam_media_controller *p_mctl = NULL;
+	int is_gesture_evt =
+		(notification == NOTIFY_GESTURE_EVT)
+		|| (notification == NOTIFY_GESTURE_CAM_EVT);
 
-	mctl_handle = msm_camera_server_find_mctl(notification, arg);
-	if (mctl_handle < 0) {
-		pr_err("%s: Couldn't find mctl instance!\n", __func__);
-		return;
+	if (!is_gesture_evt) {
+		mctl_handle = msm_camera_server_find_mctl(notification, arg);
+		if (mctl_handle < 0) {
+			pr_err("%s: Couldn't find mctl instance!\n", __func__);
+			return;
+		}
 	}
 	switch (notification) {
 	case NOTIFY_ISP_MSG_EVT:
@@ -2283,12 +2288,17 @@
 	int *p_active)
 {
 	int rc = 0;
+	int i = 0;
 	struct msm_cam_media_controller *pmctl = NULL;
 	*p_active = 0;
-	if (g_server_dev.pcam_active[pcam->server_queue_idx]) {
-		D("%s: Active camera present return", __func__);
-		return 0;
+
+	for (i = 0; i < MAX_NUM_ACTIVE_CAMERA; i++) {
+		if (NULL != g_server_dev.pcam_active[i]) {
+			pr_info("%s: Active camera present return", __func__);
+			return 0;
+		}
 	}
+
 	rc = msm_cam_server_open_session(&g_server_dev, pcam);
 	if (rc < 0) {
 		pr_err("%s: cam_server_open_session failed %d\n",
diff --git a/drivers/media/video/msm/vfe/Makefile b/drivers/media/video/msm/vfe/Makefile
index 8068e4f..91f0e7f 100644
--- a/drivers/media/video/msm/vfe/Makefile
+++ b/drivers/media/video/msm/vfe/Makefile
@@ -16,4 +16,5 @@
 obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
 obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
 obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o
+obj-$(CONFIG_ARCH_MSM8974) += msm_vfe40.o msm_vfe40_axi.o
 obj-$(CONFIG_MSM_CAMERA_V4L2) += msm_vfe_stats_buf.o
diff --git a/drivers/media/video/msm/vfe/msm_vfe32.c b/drivers/media/video/msm/vfe/msm_vfe32.c
index 28b88dd..3e01437 100644
--- a/drivers/media/video/msm/vfe/msm_vfe32.c
+++ b/drivers/media/video/msm/vfe/msm_vfe32.c
@@ -1133,8 +1133,6 @@
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
 	vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_START_REQUESTED;
 	msm_camera_io_w_mb(1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
@@ -1148,8 +1146,6 @@
 	vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
 	msm_camera_io_w_mb(1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
 	return 0;
 }
 
@@ -1162,8 +1158,6 @@
 	vfe32_ctrl->share_ctrl->vfe_capture_count =
 		vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt;
 
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
 	vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
 	msm_camera_io_w_mb(1, vfe32_ctrl->
 		share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
@@ -1176,8 +1170,6 @@
 	vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_STOP_REQUESTED;
 	msm_camera_io_w_mb(1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
 }
 
 static int vfe32_zsl(
@@ -1185,8 +1177,6 @@
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	vfe32_start_common(vfe32_ctrl);
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
 
 	msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x18C);
 	msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x188);
@@ -1199,8 +1189,6 @@
 {
 	vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt = num_frames_capture;
 	vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
 	vfe32_start_common(vfe32_ctrl);
 	return 0;
 }
@@ -1228,9 +1216,6 @@
 
 	vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
 
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
-
 	vfe32_start_common(vfe32_ctrl);
 	/* for debug */
 	msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x18C);
@@ -1242,8 +1227,6 @@
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	msm_camio_bus_scale_cfg(
-		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
 	vfe32_start_common(vfe32_ctrl);
 	return 0;
 }
@@ -1478,53 +1461,53 @@
 		b = &outch->free_buf;
 	return b;
 }
-static int vfe32_configure_pingpong_buffers(
-	int id, int path, struct vfe32_ctrl_type *vfe32_ctrl)
+static int configure_pingpong_buffers(
+	int id, int path, struct axi_ctrl_t *axi_ctrl)
 {
 	struct vfe32_output_ch *outch = NULL;
 	int rc = 0;
 	uint32_t inst_handle = 0;
 	if (path == VFE_MSG_OUTPUT_PRIMARY)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out0.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out0.inst_handle;
 	else if (path == VFE_MSG_OUTPUT_SECONDARY)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out1.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out1.inst_handle;
 	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out2.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out2.inst_handle;
 	else if (path == VFE_MSG_OUTPUT_TERTIARY2)
-		inst_handle = vfe32_ctrl->share_ctrl->outpath.out3.inst_handle;
+		inst_handle = axi_ctrl->share_ctrl->outpath.out3.inst_handle;
 
 	vfe32_subdev_notify(id, path, inst_handle,
-		&vfe32_ctrl->subdev, vfe32_ctrl->share_ctrl);
-	outch = vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+		&axi_ctrl->subdev, axi_ctrl->share_ctrl);
+	outch = vfe32_get_ch(path, axi_ctrl->share_ctrl);
 	if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
 		/* Configure Preview Ping Pong */
 		pr_info("%s Configure ping/pong address for %d",
 						__func__, path);
 		vfe32_put_ch_ping_addr(
-			vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
+			axi_ctrl->share_ctrl->vfebase, outch->ch0,
 			outch->ping.ch_paddr[0]);
 		vfe32_put_ch_pong_addr(
-			vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
+			axi_ctrl->share_ctrl->vfebase, outch->ch0,
 			outch->pong.ch_paddr[0]);
 
-		if ((vfe32_ctrl->share_ctrl->current_mode !=
+		if ((axi_ctrl->share_ctrl->current_mode !=
 			VFE_OUTPUTS_RAW) && (path != VFE_MSG_OUTPUT_TERTIARY1)
 			&& (path != VFE_MSG_OUTPUT_TERTIARY2)) {
 			vfe32_put_ch_ping_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
+				axi_ctrl->share_ctrl->vfebase, outch->ch1,
 				outch->ping.ch_paddr[1]);
 			vfe32_put_ch_pong_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
+				axi_ctrl->share_ctrl->vfebase, outch->ch1,
 				outch->pong.ch_paddr[1]);
 		}
 
 		if (outch->ping.num_planes > 2)
 			vfe32_put_ch_ping_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch2,
+				axi_ctrl->share_ctrl->vfebase, outch->ch2,
 				outch->ping.ch_paddr[2]);
 		if (outch->pong.num_planes > 2)
 			vfe32_put_ch_pong_addr(
-				vfe32_ctrl->share_ctrl->vfebase, outch->ch2,
+				axi_ctrl->share_ctrl->vfebase, outch->ch2,
 				outch->pong.ch_paddr[2]);
 
 		/* avoid stale info */
@@ -1579,7 +1562,6 @@
 	uint32_t *cmdp_local = NULL;
 	uint32_t snapshot_cnt = 0;
 	uint32_t temp1 = 0, temp2 = 0;
-	uint16_t vfe_mode = 0;
 	struct msm_camera_vfe_params_t vfe_params;
 
 	CDBG("vfe32_proc_general: cmdID = %s, length = %d\n",
@@ -1602,42 +1584,7 @@
 
 		vfe32_ctrl->share_ctrl->current_mode =
 			vfe_params.operation_mode;
-		vfe_mode = vfe32_ctrl->share_ctrl->current_mode
-			& ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
-		if (vfe_mode) {
-			if ((vfe32_ctrl->share_ctrl->current_mode &
-				VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
-				(vfe32_ctrl->share_ctrl->current_mode &
-				VFE_OUTPUTS_PREVIEW))
-				/* Configure primary channel */
-				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_START,
-					VFE_MSG_OUTPUT_PRIMARY,
-					vfe32_ctrl);
-			else
-			/* Configure secondary channel */
-				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_START,
-					VFE_MSG_OUTPUT_SECONDARY,
-					vfe32_ctrl);
-		}
-		if (vfe32_ctrl->share_ctrl->current_mode &
-				VFE_OUTPUTS_RDI0)
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
-				vfe32_ctrl);
-		if (vfe32_ctrl->share_ctrl->current_mode &
-				VFE_OUTPUTS_RDI1)
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
-				vfe32_ctrl);
 
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for preview", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
 		rc = vfe32_start(pmctl, vfe32_ctrl);
 		break;
 	case VFE_CMD_UPDATE:
@@ -1655,15 +1602,6 @@
 		snapshot_cnt = vfe_params.capture_count;
 		vfe32_ctrl->share_ctrl->current_mode =
 			vfe_params.operation_mode;
-		rc = vfe32_configure_pingpong_buffers(
-			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
-			vfe32_ctrl);
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for snapshot", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
 		rc = vfe32_capture_raw(pmctl, vfe32_ctrl, snapshot_cnt);
 		break;
 	case VFE_CMD_CAPTURE:
@@ -1677,78 +1615,12 @@
 		snapshot_cnt = vfe_params.capture_count;
 		vfe32_ctrl->share_ctrl->current_mode =
 			vfe_params.operation_mode;
-		if (vfe32_ctrl->share_ctrl->current_mode ==
-			VFE_OUTPUTS_JPEG_AND_THUMB ||
-		vfe32_ctrl->share_ctrl->current_mode ==
-			VFE_OUTPUTS_THUMB_AND_JPEG) {
-			if (snapshot_cnt != 1) {
-				pr_err("only support 1 inline snapshot\n");
-				rc = -EINVAL;
-				goto proc_general_done;
-			}
-			/* Configure primary channel for JPEG */
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_JPEG_CAPTURE,
-				VFE_MSG_OUTPUT_PRIMARY,
-				vfe32_ctrl);
-		} else {
-			/* Configure primary channel */
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_CAPTURE,
-				VFE_MSG_OUTPUT_PRIMARY,
-				vfe32_ctrl);
-		}
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for primary output", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
-		/* Configure secondary channel */
-		rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
-				vfe32_ctrl);
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for secondary output", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
+
 		rc = vfe32_capture(pmctl, snapshot_cnt, vfe32_ctrl);
 		break;
 	case VFE_CMD_START_RECORDING:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
-		if (copy_from_user(&temp1, (void __user *)(cmd->value),
-				sizeof(uint32_t))) {
-			pr_err("%s Error copying inst_handle for recording\n",
-				__func__);
-			rc = -EFAULT;
-			goto proc_general_done;
-		}
-		if (vfe32_ctrl->share_ctrl->current_mode &
-			VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
-			vfe32_ctrl->share_ctrl->outpath.out1.inst_handle =
-				temp1;
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START_RECORDING,
-				VFE_MSG_OUTPUT_SECONDARY,
-				vfe32_ctrl);
-		} else if (vfe32_ctrl->share_ctrl->current_mode &
-			VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
-			vfe32_ctrl->share_ctrl->outpath.out0.inst_handle =
-				temp1;
-			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_START_RECORDING,
-				VFE_MSG_OUTPUT_PRIMARY,
-				vfe32_ctrl);
-		}
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				" for video", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
 		rc = vfe32_start_recording(pmctl, vfe32_ctrl);
 		break;
 	case VFE_CMD_STOP_RECORDING:
@@ -2290,23 +2162,7 @@
 		break;
 
 	case VFE_CMD_LIVESHOT:
-		if (copy_from_user(&temp1, (void __user *)(cmd->value),
-				sizeof(uint32_t))) {
-			pr_err("%s Error copying inst_handle for liveshot ",
-				__func__);
-			rc = -EFAULT;
-			goto proc_general_done;
-		}
-		vfe32_ctrl->share_ctrl->outpath.out0.inst_handle = temp1;
 		/* Configure primary channel */
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_CAPTURE,
-					VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
-		if (rc < 0) {
-			pr_err("%s error configuring pingpong buffers"
-				   " for primary output", __func__);
-			rc = -EINVAL;
-			goto proc_general_done;
-		}
 		vfe32_start_liveshot(pmctl, vfe32_ctrl);
 		break;
 
@@ -2848,14 +2704,6 @@
 
 		vfe32_ctrl->share_ctrl->current_mode =
 			vfe_params.operation_mode;
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
-			VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
-		if (rc < 0)
-			goto proc_general_done;
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
-			VFE_MSG_OUTPUT_SECONDARY, vfe32_ctrl);
-		if (rc < 0)
-			goto proc_general_done;
 
 		rc = vfe32_zsl(pmctl, vfe32_ctrl);
 		break;
@@ -5171,9 +5019,185 @@
 		axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
 }
 
-void axi_start(struct axi_ctrl_t *axi_ctrl, uint16_t cmd_type)
+int axi_config_buffers(struct axi_ctrl_t *axi_ctrl,
+	struct msm_camera_vfe_params_t vfe_params)
+{
+	uint16_t vfe_mode = axi_ctrl->share_ctrl->current_mode
+			& ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
+	int rc = 0;
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW:
+		if (vfe_mode) {
+			if ((axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
+				(axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_PREVIEW))
+				/* Configure primary channel */
+				rc = configure_pingpong_buffers(
+					VFE_MSG_START,
+					VFE_MSG_OUTPUT_PRIMARY,
+					axi_ctrl);
+			else
+			/* Configure secondary channel */
+				rc = configure_pingpong_buffers(
+					VFE_MSG_START,
+					VFE_MSG_OUTPUT_SECONDARY,
+					axi_ctrl);
+		}
+		if (axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_RDI0)
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
+				axi_ctrl);
+		if (axi_ctrl->share_ctrl->current_mode &
+				VFE_OUTPUTS_RDI1)
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
+				axi_ctrl);
+
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for preview",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_RAW_CAPTURE:
+		rc = configure_pingpong_buffers(
+			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+			axi_ctrl);
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for snapshot",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_ZSL:
+		rc = configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+		if (rc < 0)
+			goto config_done;
+		rc = configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_SECONDARY, axi_ctrl);
+		if (rc < 0)
+			goto config_done;
+		break;
+	case AXI_CMD_RECORD:
+		if (axi_ctrl->share_ctrl->current_mode &
+			VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+			axi_ctrl->share_ctrl->outpath.out1.inst_handle =
+				vfe_params.inst_handle;
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_SECONDARY,
+				axi_ctrl);
+		} else if (axi_ctrl->share_ctrl->current_mode &
+			VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+			axi_ctrl->share_ctrl->outpath.out0.inst_handle =
+				vfe_params.inst_handle;
+			rc = configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_PRIMARY,
+				axi_ctrl);
+		}
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for video",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_LIVESHOT:
+		axi_ctrl->share_ctrl->outpath.out0.inst_handle =
+			vfe_params.inst_handle;
+		rc = configure_pingpong_buffers(VFE_MSG_CAPTURE,
+					VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for primary output",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	case AXI_CMD_CAPTURE:
+		if (axi_ctrl->share_ctrl->current_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		axi_ctrl->share_ctrl->current_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) {
+
+			/* Configure primary channel for JPEG */
+			rc = configure_pingpong_buffers(
+				VFE_MSG_JPEG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				axi_ctrl);
+		} else {
+			/* Configure primary channel */
+			rc = configure_pingpong_buffers(
+				VFE_MSG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				axi_ctrl);
+		}
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for primary output",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		/* Configure secondary channel */
+		rc = configure_pingpong_buffers(
+				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+				axi_ctrl);
+		if (rc < 0) {
+			pr_err("%s error configuring pingpong buffers for secondary output",
+				__func__);
+			rc = -EINVAL;
+			goto config_done;
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+
+	}
+config_done:
+	return rc;
+}
+
+void axi_start(struct msm_cam_media_controller *pmctl,
+	struct axi_ctrl_t *axi_ctrl, struct msm_camera_vfe_params_t vfe_params)
 {
 	uint32_t irq_comp_mask = 0, irq_mask = 0;
+	int rc = 0;
+	rc = axi_config_buffers(axi_ctrl, vfe_params);
+	if (rc < 0)
+		return;
+
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+		break;
+	case AXI_CMD_CAPTURE:
+	case AXI_CMD_RAW_CAPTURE:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+		break;
+	case AXI_CMD_RECORD:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+		return;
+	case AXI_CMD_ZSL:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
+		break;
+	case AXI_CMD_LIVESHOT:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
+		return;
+	default:
+		return;
+	}
 
 	irq_comp_mask =
 		msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
@@ -5225,7 +5249,7 @@
 	msm_camera_io_w(irq_comp_mask,
 		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
 
-	switch (cmd_type) {
+	switch (vfe_params.cmd_type) {
 	case AXI_CMD_PREVIEW: {
 		uint16_t operation_mode =
 		(axi_ctrl->share_ctrl->operation_mode &
@@ -5335,7 +5359,8 @@
 	atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 1);
 }
 
-void axi_stop(struct axi_ctrl_t *axi_ctrl, uint16_t cmd_type)
+void axi_stop(struct msm_cam_media_controller *pmctl,
+	struct axi_ctrl_t *axi_ctrl, struct msm_camera_vfe_params_t vfe_params)
 {
 	uint32_t reg_update = 0;
 	unsigned long flags;
@@ -5343,6 +5368,24 @@
 	axi_ctrl->share_ctrl->current_mode & ~(VFE_OUTPUTS_RDI0|
 		VFE_OUTPUTS_RDI1);
 
+	switch (vfe_params.cmd_type) {
+	case AXI_CMD_PREVIEW:
+	case AXI_CMD_CAPTURE:
+	case AXI_CMD_RAW_CAPTURE:
+	case AXI_CMD_ZSL:
+		break;
+	case AXI_CMD_RECORD:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+		return;
+	case AXI_CMD_LIVESHOT:
+		msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+		return;
+	default:
+		return;
+	}
+
 	if (!axi_ctrl->share_ctrl->skip_abort) {
 		atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 0);
 		axi_disable_irq(axi_ctrl);
@@ -5351,7 +5394,7 @@
 	spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
 	axi_ctrl->share_ctrl->stop_ack_pending  = TRUE;
 	spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
-	switch (cmd_type) {
+	switch (vfe_params.cmd_type) {
 	case AXI_CMD_PREVIEW: {
 		switch (operation_mode) {
 		case VFE_OUTPUTS_PREVIEW:
@@ -5474,6 +5517,8 @@
 	struct msm_vfe_cfg_cmd cfgcmd;
 	struct msm_isp_cmd vfecmd;
 	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	struct msm_cam_media_controller *pmctl =
+		(struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
 	int rc = 0, vfe_cmd_type = 0, rdi_mode = 0;
 	unsigned long flags;
 
@@ -5676,7 +5721,7 @@
 			vfe_params.skip_abort;
 		spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
 			flags);
-		axi_start(axi_ctrl, vfe_params.cmd_type);
+		axi_start(pmctl, axi_ctrl, vfe_params);
 		}
 		break;
 	case CMD_AXI_STOP: {
@@ -5693,7 +5738,7 @@
 			vfe_params.skip_abort;
 		spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
 			flags);
-		axi_stop(axi_ctrl, vfe_params.cmd_type);
+		axi_stop(pmctl, axi_ctrl, vfe_params);
 		}
 		break;
 	case CMD_AXI_RESET:
diff --git a/drivers/media/video/msm/vfe/msm_vfe40.c b/drivers/media/video/msm/vfe/msm_vfe40.c
new file mode 100644
index 0000000..5a1d488
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40.c
@@ -0,0 +1,3699 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_isp.h>
+
+#include "msm.h"
+#include "msm_vfe40.h"
+
+struct vfe40_isr_queue_cmd {
+	struct list_head list;
+	uint32_t                           vfeInterruptStatus0;
+	uint32_t                           vfeInterruptStatus1;
+};
+
+static const char * const vfe40_general_cmd[] = {
+	"DUMMY_0",  /* 0 */
+	"SET_CLK",
+	"RESET",
+	"START",
+	"TEST_GEN_START",
+	"OPERATION_CFG",  /* 5 */
+	"AXI_OUT_CFG",
+	"CAMIF_CFG",
+	"AXI_INPUT_CFG",
+	"BLACK_LEVEL_CFG",
+	"ROLL_OFF_CFG",  /* 10 */
+	"DEMUX_CFG",
+	"FOV_CFG",
+	"MAIN_SCALER_CFG",
+	"WB_CFG",
+	"COLOR_COR_CFG", /* 15 */
+	"RGB_G_CFG",
+	"LA_CFG",
+	"CHROMA_EN_CFG",
+	"CHROMA_SUP_CFG",
+	"MCE_CFG", /* 20 */
+	"SK_ENHAN_CFG",
+	"ASF_CFG",
+	"S2Y_CFG",
+	"S2CbCr_CFG",
+	"CHROMA_SUBS_CFG",  /* 25 */
+	"OUT_CLAMP_CFG",
+	"FRAME_SKIP_CFG",
+	"DUMMY_1",
+	"DUMMY_2",
+	"DUMMY_3",  /* 30 */
+	"UPDATE",
+	"BL_LVL_UPDATE",
+	"DEMUX_UPDATE",
+	"FOV_UPDATE",
+	"MAIN_SCALER_UPDATE",  /* 35 */
+	"WB_UPDATE",
+	"COLOR_COR_UPDATE",
+	"RGB_G_UPDATE",
+	"LA_UPDATE",
+	"CHROMA_EN_UPDATE",  /* 40 */
+	"CHROMA_SUP_UPDATE",
+	"MCE_UPDATE",
+	"SK_ENHAN_UPDATE",
+	"S2CbCr_UPDATE",
+	"S2Y_UPDATE",  /* 45 */
+	"ASF_UPDATE",
+	"FRAME_SKIP_UPDATE",
+	"CAMIF_FRAME_UPDATE",
+	"STATS_AF_UPDATE",
+	"STATS_AE_UPDATE",  /* 50 */
+	"STATS_AWB_UPDATE",
+	"STATS_RS_UPDATE",
+	"STATS_CS_UPDATE",
+	"STATS_SKIN_UPDATE",
+	"STATS_IHIST_UPDATE",  /* 55 */
+	"DUMMY_4",
+	"EPOCH1_ACK",
+	"EPOCH2_ACK",
+	"START_RECORDING",
+	"STOP_RECORDING",  /* 60 */
+	"DUMMY_5",
+	"DUMMY_6",
+	"CAPTURE",
+	"DUMMY_7",
+	"STOP",  /* 65 */
+	"GET_HW_VERSION",
+	"GET_FRAME_SKIP_COUNTS",
+	"OUTPUT1_BUFFER_ENQ",
+	"OUTPUT2_BUFFER_ENQ",
+	"OUTPUT3_BUFFER_ENQ",  /* 70 */
+	"JPEG_OUT_BUF_ENQ",
+	"RAW_OUT_BUF_ENQ",
+	"RAW_IN_BUF_ENQ",
+	"STATS_AF_ENQ",
+	"STATS_AE_ENQ",  /* 75 */
+	"STATS_AWB_ENQ",
+	"STATS_RS_ENQ",
+	"STATS_CS_ENQ",
+	"STATS_SKIN_ENQ",
+	"STATS_IHIST_ENQ",  /* 80 */
+	"DUMMY_8",
+	"JPEG_ENC_CFG",
+	"DUMMY_9",
+	"STATS_AF_START",
+	"STATS_AF_STOP",  /* 85 */
+	"STATS_AE_START",
+	"STATS_AE_STOP",
+	"STATS_AWB_START",
+	"STATS_AWB_STOP",
+	"STATS_RS_START",  /* 90 */
+	"STATS_RS_STOP",
+	"STATS_CS_START",
+	"STATS_CS_STOP",
+	"STATS_SKIN_START",
+	"STATS_SKIN_STOP",  /* 95 */
+	"STATS_IHIST_START",
+	"STATS_IHIST_STOP",
+	"DUMMY_10",
+	"SYNC_TIMER_SETTING",
+	"ASYNC_TIMER_SETTING",  /* 100 */
+	"LIVESHOT",
+	"LA_SETUP",
+	"LINEARIZATION_CFG",
+	"DEMOSAICV3",
+	"DEMOSAICV3_ABCC_CFG", /* 105 */
+	"DEMOSAICV3_DBCC_CFG",
+	"DEMOSAICV3_DBPC_CFG",
+	"DEMOSAICV3_ABF_CFG",
+	"DEMOSAICV3_ABCC_UPDATE",
+	"DEMOSAICV3_DBCC_UPDATE", /* 110 */
+	"DEMOSAICV3_DBPC_UPDATE",
+	"XBAR_CFG",
+	"EZTUNE_CFG",
+	"V40_ZSL",
+	"LINEARIZATION_UPDATE", /*115*/
+	"DEMOSAICV3_ABF_UPDATE",
+	"CLF_CFG",
+	"CLF_LUMA_UPDATE",
+	"CLF_CHROMA_UPDATE",
+	"PCA_ROLL_OFF_CFG", /*120*/
+	"PCA_ROLL_OFF_UPDATE",
+	"GET_REG_DUMP",
+	"GET_LINEARIZATON_TABLE",
+	"GET_MESH_ROLLOFF_TABLE",
+	"GET_PCA_ROLLOFF_TABLE", /*125*/
+	"GET_RGB_G_TABLE",
+	"GET_LA_TABLE",
+	"DEMOSAICV3_UPDATE",
+	"ACTIVE_REGION_CONFIG",
+	"COLOR_PROCESSING_CONFIG", /*130*/
+	"STATS_WB_AEC_CONFIG",
+	"STATS_WB_AEC_UPDATE",
+	"Y_GAMMA_CONFIG",
+	"SCALE_OUTPUT1_CONFIG",
+	"SCALE_OUTPUT2_CONFIG", /*135*/
+	"CAPTURE_RAW",
+	"STOP_LIVESHOT",
+	"RECONFIG_VFE",
+	"STATS_REQBUF_CFG",
+	"STATS_ENQUEUEBUF_CFG",/*140*/
+	"STATS_FLUSH_BUFQ_CFG",
+	"FOV_ENC_CFG",
+	"FOV_VIEW_CFG",
+	"FOV_ENC_UPDATE",
+	"FOV_VIEW_UPDATE",/*145*/
+	"SCALER_ENC_CFG",
+	"SCALER_VIEW_CFG",
+	"SCALER_ENC_UPDATE",
+	"SCALER_VIEW_UPDATE",
+	"COLORXFORM_ENC_CFG",/*150*/
+	"COLORXFORM_VIEW_CFG",
+	"COLORXFORM_ENC_UPDATE",
+	"COLORXFORM_VIEW_UPDATE",
+};
+
+static void vfe40_stop(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+	/* for reset hw modules, and send msg when reset_irq comes.*/
+	spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+	vfe40_ctrl->share_ctrl->stop_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+
+	/* disable all interrupts.  */
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQ0,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQ1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* in either continuous or snapshot mode, stop command can be issued
+	 * at any time. stop camif immediately. */
+	msm_camera_io_w(CAMIF_COMMAND_STOP_IMMEDIATELY,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+}
+
+void vfe40_subdev_notify(int id, int path, int image_mode,
+	struct v4l2_subdev *sd, struct vfe_share_ctrl_t *share_ctrl)
+{
+	struct msm_vfe_resp rp;
+	struct msm_frame_info frame_info;
+	unsigned long flags = 0;
+	spin_lock_irqsave(&share_ctrl->sd_notify_lock, flags);
+	CDBG("%s: msgId = %d\n", __func__, id);
+	memset(&rp, 0, sizeof(struct msm_vfe_resp));
+	rp.evt_msg.type   = MSM_CAMERA_MSG;
+	frame_info.image_mode = image_mode;
+	frame_info.path = path;
+	rp.evt_msg.data = &frame_info;
+	rp.type	   = id;
+	v4l2_subdev_notify(sd, NOTIFY_VFE_BUF_EVT, &rp);
+	spin_unlock_irqrestore(&share_ctrl->sd_notify_lock, flags);
+}
+
+static void vfe40_reset_internal_variables(
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	vfe40_ctrl->vfeImaskCompositePacked = 0;
+	/* state control variables */
+	vfe40_ctrl->start_ack_pending = FALSE;
+	atomic_set(&vfe40_ctrl->share_ctrl->irq_cnt, 0);
+
+	spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+	vfe40_ctrl->share_ctrl->stop_ack_pending  = FALSE;
+	spin_unlock_irqrestore(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+
+	vfe40_ctrl->reset_ack_pending  = FALSE;
+
+	spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+	vfe40_ctrl->update_ack_pending = FALSE;
+	spin_unlock_irqrestore(&vfe40_ctrl->update_ack_lock, flags);
+
+	vfe40_ctrl->recording_state = VFE_STATE_IDLE;
+	vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+	/* 0 for continuous mode, 1 for snapshot mode */
+	vfe40_ctrl->share_ctrl->operation_mode = 0;
+	vfe40_ctrl->share_ctrl->outpath.output_mode = 0;
+	vfe40_ctrl->share_ctrl->vfe_capture_count = 0;
+
+	/* this is unsigned 32 bit integer. */
+	vfe40_ctrl->share_ctrl->vfeFrameId = 0;
+	/* Stats control variables. */
+	memset(&(vfe40_ctrl->afStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->awbStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->aecStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->ihistStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->rsStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe40_ctrl->csStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	vfe40_ctrl->frame_skip_cnt = 31;
+	vfe40_ctrl->frame_skip_pattern = 0xffffffff;
+	vfe40_ctrl->snapshot_frame_cnt = 0;
+}
+
+static void vfe40_reset(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	vfe40_reset_internal_variables(vfe40_ctrl);
+	/* disable all interrupts.  vfeImaskLocal is also reset to 0
+	* to begin with. */
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+
+	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* clear all pending interrupts*/
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1, vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+	/* enable reset_ack interrupt.  */
+	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset
+	 * is done, hardware interrupt will be generated.  VFE ist processes
+	 * the interrupt to complete the function call.  Note that the reset
+	 * function is synchronous. */
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_0);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_1);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_2);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_3);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_4);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_5);
+	msm_camera_io_w(0xAAAAAAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_6);
+	msm_camera_io_w(0x0002AAAA,
+	vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static int vfe40_operation_config(uint32_t *cmd,
+			struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t *p = cmd;
+
+	vfe40_ctrl->share_ctrl->operation_mode = *p;
+	vfe40_ctrl->share_ctrl->stats_comp = *(++p);
+	vfe40_ctrl->hfr_mode = *(++p);
+
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CFG);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_RDI0_CFG);
+	if (msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+		V40_GET_HW_VERSION_OFF) ==
+		VFE40_HW_NUMBER) {
+		msm_camera_io_w(*(++p),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_RDI1_CFG);
+		msm_camera_io_w(*(++p),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_RDI2_CFG);
+	}  else {
+		++p;
+		++p;
+	}
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REALIGN_BUF);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CHROMA_UP);
+	msm_camera_io_w(*(++p),
+		vfe40_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+	return 0;
+}
+
+static unsigned long vfe40_stats_dqbuf(struct vfe40_ctrl_type *vfe40_ctrl,
+	enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_meta_buf *buf = NULL;
+	int rc = 0;
+	rc = vfe40_ctrl->stats_ops.dqbuf(
+			vfe40_ctrl->stats_ops.stats_ctrl, stats_type, &buf);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+	return buf->paddr;
+}
+
+static unsigned long vfe40_stats_flush_enqueue(
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	enum msm_stats_enum_type stats_type)
+{
+	struct msm_stats_bufq *bufq = NULL;
+	struct msm_stats_meta_buf *stats_buf = NULL;
+	int rc = 0;
+	int i;
+
+	/*
+	 * Passing NULL for ion client as the buffers are already
+	 * mapped at this stage, client is not required, flush all
+	 * the buffers, and buffers move to PREPARE state
+	 */
+
+	rc = vfe40_ctrl->stats_ops.bufq_flush(
+			vfe40_ctrl->stats_ops.stats_ctrl, stats_type, NULL);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf (type = %d) err = %d",
+			__func__, stats_type, rc);
+		return 0L;
+	}
+	/* Queue all the buffers back to QUEUED state */
+	bufq = vfe40_ctrl->stats_ctrl.bufq[stats_type];
+	for (i = 0; i < bufq->num_bufs; i++) {
+		stats_buf = &bufq->bufs[i];
+		rc = vfe40_ctrl->stats_ops.enqueue_buf(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				&(stats_buf->info), NULL);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf (type = %d) err = %d",
+				 __func__, stats_type, rc);
+			return rc;
+		}
+	}
+	return 0L;
+}
+
+static int vfe_stats_awb_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AWB_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq awb ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_aec_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AEC_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AEC);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq aec pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_af_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	rc = vfe40_stats_flush_enqueue(vfe40_ctrl, MSM_STATS_TYPE_AF);
+	if (rc < 0) {
+		pr_err("%s: dq stats buf err = %d",
+			   __func__, rc);
+		spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+		return -EINVAL;
+	}
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AF_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AF);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq af pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_AF_WR_PONG_ADDR);
+
+	return 0;
+}
+
+static int vfe_stats_ihist_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_HIST_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_HIST_WR_PONG_ADDR);
+
+	return 0;
+}
+
+static int vfe_stats_rs_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_RS_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq rs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_RS_WR_PONG_ADDR);
+	return 0;
+}
+
+static int vfe_stats_cs_buf_init(
+	struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+	uint32_t addr;
+	unsigned long flags;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs ping buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_CS_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq cs pong buf from free buf queue", __func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_CS_WR_PONG_ADDR);
+	return 0;
+}
+
+static void vfe40_start_common(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_mask = 0x1E000011;
+	vfe40_ctrl->start_ack_pending = TRUE;
+	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
+		vfe40_ctrl->share_ctrl->operation_mode,
+		vfe40_ctrl->share_ctrl->outpath.output_mode);
+
+	msm_camera_io_w(irq_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+
+	msm_camera_io_dump(vfe40_ctrl->share_ctrl->vfebase,
+		vfe40_ctrl->share_ctrl->register_total*4);
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 1);
+}
+
+static int vfe40_start_recording(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+	vfe40_ctrl->recording_state = VFE_STATE_START_REQUESTED;
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	return 0;
+}
+
+static int vfe40_stop_recording(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	vfe40_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+	return 0;
+}
+
+static void vfe40_start_liveshot(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	/* Hardcode 1 live snapshot for now. */
+	vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt = 1;
+	vfe40_ctrl->share_ctrl->vfe_capture_count =
+		vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt;
+
+	vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
+	msm_camera_io_w_mb(1, vfe40_ctrl->
+		share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+}
+
+static int vfe40_zsl(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_comp_mask = 0;
+	/* capture command is valid for both idle and active state. */
+	irq_comp_mask	=
+		msm_camera_io_r(vfe40_ctrl->
+		share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	CDBG("%s:op mode %d O/P Mode %d\n", __func__,
+		vfe40_ctrl->share_ctrl->operation_mode,
+		vfe40_ctrl->share_ctrl->outpath.output_mode);
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+		VFE40_OUTPUT_MODE_PRIMARY) {
+		irq_comp_mask |= (
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0)) |
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch1)));
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0)) |
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch1)) |
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch2)));
+	}
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+		VFE40_OUTPUT_MODE_SECONDARY) {
+		irq_comp_mask |= ((0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0 + 8)) |
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1 + 8)));
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			   VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0 + 8)) |
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1 + 8)) |
+			(0x1 << (vfe40_ctrl->
+				share_ctrl->outpath.out1.ch2 + 8)));
+	}
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch1]);
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch1]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch2]);
+	}
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch1]);
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch0]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch1]);
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out1.ch2]);
+	}
+
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	vfe40_start_common(vfe40_ctrl);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
+
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x18C);
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x188);
+	return 0;
+}
+static int vfe40_capture_raw(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t num_frames_capture)
+{
+	uint32_t irq_comp_mask = 0;
+
+	vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt = num_frames_capture;
+	vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+
+	irq_comp_mask	=
+		msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+		VFE40_OUTPUT_MODE_PRIMARY) {
+		irq_comp_mask |=
+			(0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0));
+		msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+			share_ctrl->outpath.out0.ch0]);
+	}
+
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+	vfe40_start_common(vfe40_ctrl);
+	return 0;
+}
+
+static int vfe40_capture(
+	struct msm_cam_media_controller *pmctl,
+	uint32_t num_frames_capture,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_comp_mask = 0;
+
+	/* capture command is valid for both idle and active state. */
+	vfe40_ctrl->share_ctrl->outpath.out1.capture_cnt = num_frames_capture;
+	if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) {
+		vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt =
+			num_frames_capture;
+	}
+
+	vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+	irq_comp_mask = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN) {
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			irq_comp_mask |= (0x1 << vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0 |
+				0x1 << vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1);
+		}
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+			irq_comp_mask |=
+				(0x1 << (vfe40_ctrl->
+					share_ctrl->outpath.out1.ch0 + 8) |
+				0x1 << (vfe40_ctrl->
+					share_ctrl->outpath.out1.ch1 + 8));
+		}
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		}
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		}
+	}
+
+	vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+
+	vfe40_start_common(vfe40_ctrl);
+	/* for debug */
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x18C);
+	msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x188);
+	return 0;
+}
+
+static int vfe40_start(
+	struct msm_cam_media_controller *pmctl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t irq_comp_mask = 0;
+	irq_comp_mask	=
+		msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_COMP_MASK);
+
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+		irq_comp_mask |= (
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch0 |
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch1);
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			   VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch0 |
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch1 |
+			0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch2);
+	}
+	if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+		irq_comp_mask |= (
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch1 + 8));
+	} else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+		irq_comp_mask |= (
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
+			0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch2 + 8));
+	}
+	msm_camera_io_w(irq_comp_mask,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+	/*
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);*/
+	vfe40_start_common(vfe40_ctrl);
+	return 0;
+}
+
+static void vfe40_update(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t value = 0;
+	if (vfe40_ctrl->update_linear) {
+		if (!msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1))
+			msm_camera_io_w(1,
+				vfe40_ctrl->share_ctrl->vfebase +
+				V40_LINEARIZATION_OFF1);
+		else
+			msm_camera_io_w(0,
+				vfe40_ctrl->share_ctrl->vfebase +
+				V40_LINEARIZATION_OFF1);
+		vfe40_ctrl->update_linear = false;
+	}
+
+	if (vfe40_ctrl->update_la) {
+		if (!msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF))
+			msm_camera_io_w(1,
+				vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+		else
+			msm_camera_io_w(0,
+				vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+		vfe40_ctrl->update_la = false;
+	}
+
+	if (vfe40_ctrl->update_gamma) {
+		value = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		value ^= V40_GAMMA_LUT_BANK_SEL_MASK;
+		msm_camera_io_w(value,
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		vfe40_ctrl->update_gamma = false;
+	}
+
+	spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+	vfe40_ctrl->update_ack_pending = TRUE;
+	spin_unlock_irqrestore(&vfe40_ctrl->update_ack_lock, flags);
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	return;
+}
+
+static void vfe40_sync_timer_stop(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t value = 0;
+	vfe40_ctrl->sync_timer_state = 0;
+	if (vfe40_ctrl->sync_timer_number == 0)
+		value = 0x10000;
+	else if (vfe40_ctrl->sync_timer_number == 1)
+		value = 0x20000;
+	else if (vfe40_ctrl->sync_timer_number == 2)
+		value = 0x40000;
+
+	/* Timer Stop */
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF);
+}
+
+static void vfe40_sync_timer_start(
+	const uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = 1;
+	uint32_t val;
+
+	vfe40_ctrl->sync_timer_state = *tbl++;
+	vfe40_ctrl->sync_timer_repeat_count = *tbl++;
+	vfe40_ctrl->sync_timer_number = *tbl++;
+	CDBG("%s timer_state %d, repeat_cnt %d timer number %d\n",
+		 __func__, vfe40_ctrl->sync_timer_state,
+		 vfe40_ctrl->sync_timer_repeat_count,
+		 vfe40_ctrl->sync_timer_number);
+
+	if (vfe40_ctrl->sync_timer_state) { /* Start Timer */
+		value = value << vfe40_ctrl->sync_timer_number;
+	} else { /* Stop Timer */
+		CDBG("Failed to Start timer\n");
+		return;
+	}
+
+	/* Timer Start */
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF);
+	/* Sync Timer Line Start */
+	value = *tbl++;
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+		4 + ((vfe40_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Start */
+	value = *tbl++;
+	msm_camera_io_w(value,
+			vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+			 8 + ((vfe40_ctrl->sync_timer_number) * 12));
+	/* Sync Timer Pixel Duration */
+	value = *tbl++;
+	val = vfe40_ctrl->share_ctrl->vfe_clk_rate / 10000;
+	val = 10000000 / val;
+	val = value * 10000 / val;
+	CDBG("%s: Pixel Clk Cycles!!! %d\n", __func__, val);
+	msm_camera_io_w(val,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+		12 + ((vfe40_ctrl->sync_timer_number) * 12));
+	/* Timer0 Active High/LOW */
+	value = *tbl++;
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_POLARITY_OFF);
+	/* Selects sync timer 0 output to drive onto timer1 port */
+	value = 0;
+	msm_camera_io_w(value,
+		vfe40_ctrl->share_ctrl->vfebase + V40_TIMER_SELECT_OFF);
+}
+
+static void vfe40_program_dmi_cfg(
+	enum VFE40_DMI_RAM_SEL bankSel,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = VFE_DMI_CFG_DEFAULT;
+	value += (uint32_t)bankSel;
+	CDBG("%s: banksel = %d\n", __func__, bankSel);
+
+	msm_camera_io_w(value, vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_CFG);
+	/* by default, always starts with offset 0.*/
+	msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_ADDR);
+}
+static void vfe40_write_gamma_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	const uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	int i;
+	uint32_t value, value1, value2;
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	for (i = 0 ; i < (VFE40_GAMMA_NUM_ENTRIES/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_camera_io_w((value1),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_camera_io_w((value2),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+static void vfe40_read_gamma_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	int i;
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	CDBG("%s: Gamma table channel: %d\n", __func__, channel_sel);
+	for (i = 0 ; i < VFE40_GAMMA_NUM_ENTRIES ; i++) {
+		*tbl = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		CDBG("%s: %08x\n", __func__, *tbl);
+		tbl++;
+	}
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+static void vfe40_write_la_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	const uint32_t *tbl,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t i;
+	uint32_t value, value1, value2;
+
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	for (i = 0 ; i < (VFE40_LA_TABLE_LENGTH/2) ; i++) {
+		value = *tbl++;
+		value1 = value & 0x0000FFFF;
+		value2 = (value & 0xFFFF0000)>>16;
+		msm_camera_io_w((value1),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		msm_camera_io_w((value2),
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+	}
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+struct vfe40_output_ch *vfe40_get_ch(
+	int path, struct vfe_share_ctrl_t *share_ctrl)
+{
+	struct vfe40_output_ch *ch = NULL;
+
+	if (path == VFE_MSG_OUTPUT_PRIMARY)
+		ch = &share_ctrl->outpath.out0;
+	else if (path == VFE_MSG_OUTPUT_SECONDARY)
+		ch = &share_ctrl->outpath.out1;
+	else
+		pr_err("%s: Invalid path %d\n", __func__,
+			path);
+
+	BUG_ON(ch == NULL);
+	return ch;
+}
+
+static int vfe40_configure_pingpong_buffers(
+	int id, int path, struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	struct vfe40_output_ch *outch = NULL;
+	int rc = 0;
+	uint32_t image_mode = 0;
+	if (path == VFE_MSG_OUTPUT_PRIMARY)
+		image_mode = vfe40_ctrl->share_ctrl->outpath.out0.image_mode;
+	else
+		image_mode = vfe40_ctrl->share_ctrl->outpath.out1.image_mode;
+
+	vfe40_subdev_notify(id, path, image_mode,
+		&vfe40_ctrl->subdev, vfe40_ctrl->share_ctrl);
+	outch = vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+	if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
+		/* Configure Preview Ping Pong */
+		CDBG("%s Configure ping/pong address for %d",
+						__func__, path);
+		vfe40_put_ch_ping_addr(
+			vfe40_ctrl->share_ctrl->vfebase, outch->ch0,
+			outch->ping.ch_paddr[0]);
+		vfe40_put_ch_pong_addr(
+			vfe40_ctrl->share_ctrl->vfebase, outch->ch0,
+			outch->pong.ch_paddr[0]);
+
+		if (vfe40_ctrl->share_ctrl->operation_mode !=
+			VFE_OUTPUTS_RAW) {
+			vfe40_put_ch_ping_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch1,
+				outch->ping.ch_paddr[1]);
+			vfe40_put_ch_pong_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch1,
+				outch->pong.ch_paddr[1]);
+		}
+
+		if (outch->ping.num_planes > 2)
+			vfe40_put_ch_ping_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch2,
+				outch->ping.ch_paddr[2]);
+		if (outch->pong.num_planes > 2)
+			vfe40_put_ch_pong_addr(
+				vfe40_ctrl->share_ctrl->vfebase, outch->ch2,
+				outch->pong.ch_paddr[2]);
+
+		/* avoid stale info */
+		memset(&outch->ping, 0, sizeof(struct msm_free_buf));
+		memset(&outch->pong, 0, sizeof(struct msm_free_buf));
+	} else {
+		pr_err("%s ping/pong addr is null!!", __func__);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static void vfe40_write_linear_cfg(
+	enum VFE40_DMI_RAM_SEL channel_sel,
+	const uint32_t *tbl, struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	uint32_t i;
+
+	vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+	/* for loop for configuring LUT. */
+	for (i = 0 ; i < VFE40_LINEARIZATON_TABLE_LENGTH ; i++) {
+		msm_camera_io_w(*tbl,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+		tbl++;
+	}
+	CDBG("done writing to linearization table\n");
+	vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+void vfe40_send_isp_msg(
+	struct v4l2_subdev *sd,
+	uint32_t vfeFrameId,
+	uint32_t isp_msg_id)
+{
+	struct isp_msg_event isp_msg_evt;
+
+	isp_msg_evt.msg_id = isp_msg_id;
+	isp_msg_evt.sof_count = vfeFrameId;
+	v4l2_subdev_notify(sd,
+			NOTIFY_ISP_MSG_EVT,
+			(void *)&isp_msg_evt);
+}
+
+static int vfe40_proc_general(
+	struct msm_cam_media_controller *pmctl,
+	struct msm_isp_cmd *cmd,
+	struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	int i , rc = 0;
+	uint32_t old_val = 0 , new_val = 0;
+	uint32_t *cmdp = NULL;
+	uint32_t *cmdp_local = NULL;
+	uint32_t snapshot_cnt = 0;
+	uint32_t temp1 = 0, temp2 = 0;
+
+	CDBG("vfe40_proc_general: cmdID = %s, length = %d\n",
+		vfe40_general_cmd[cmd->id], cmd->length);
+	switch (cmd->id) {
+	case VFE_CMD_RESET:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		vfe40_reset(vfe40_ctrl);
+		break;
+	case VFE_CMD_START:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		if ((vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
+				(vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW))
+			/* Configure primary channel */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		else
+			/* Configure secondary channel */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START, VFE_MSG_OUTPUT_SECONDARY,
+				vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+				"%s error configuring pingpong buffers for preview",
+				__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+
+		rc = vfe40_start(pmctl, vfe40_ctrl);
+		break;
+	case VFE_CMD_UPDATE:
+		vfe40_update(vfe40_ctrl);
+		break;
+	case VFE_CMD_CAPTURE_RAW:
+		CDBG("%s: cmdID = VFE_CMD_CAPTURE_RAW\n", __func__);
+		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+				sizeof(uint32_t))) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe40_configure_pingpong_buffers(
+			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+			vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+				"%s error configuring pingpong buffers for snapshot",
+				__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		rc = vfe40_capture_raw(pmctl, vfe40_ctrl, snapshot_cnt);
+		break;
+	case VFE_CMD_CAPTURE:
+		if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+				sizeof(uint32_t))) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) {
+			if (snapshot_cnt != 1) {
+				pr_err("only support 1 inline snapshot\n");
+				rc = -EINVAL;
+				goto proc_general_done;
+			}
+			/* Configure primary channel for JPEG */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_JPEG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		} else {
+			/* Configure primary channel */
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_CAPTURE,
+				VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		}
+		if (rc < 0) {
+			pr_err(
+			"%s error configuring pingpong buffers for primary output",
+			__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		/* Configure secondary channel */
+		rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+				vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+			"%s error configuring pingpong buffers for secondary output",
+			__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		rc = vfe40_capture(pmctl, snapshot_cnt, vfe40_ctrl);
+		break;
+	case VFE_CMD_START_RECORDING:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_PREVIEW_AND_VIDEO)
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_SECONDARY,
+				vfe40_ctrl);
+		else if (vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_VIDEO_AND_PREVIEW)
+			rc = vfe40_configure_pingpong_buffers(
+				VFE_MSG_START_RECORDING,
+				VFE_MSG_OUTPUT_PRIMARY,
+				vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+				"%s error configuring pingpong buffers for video\n",
+				__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		rc = vfe40_start_recording(pmctl, vfe40_ctrl);
+		break;
+	case VFE_CMD_STOP_RECORDING:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		rc = vfe40_stop_recording(pmctl, vfe40_ctrl);
+		break;
+	case VFE_CMD_OPERATION_CFG: {
+		if (cmd->length != V40_OPERATION_CFG_LEN) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(V40_OPERATION_CFG_LEN, GFP_ATOMIC);
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			V40_OPERATION_CFG_LEN)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe40_operation_config(cmdp, vfe40_ctrl);
+		}
+		break;
+
+	case VFE_CMD_STATS_AE_START: {
+		rc = vfe_stats_aec_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AEC",
+				 __func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= BG_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+	case VFE_CMD_STATS_AF_START: {
+		rc = vfe_stats_af_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AF",
+				__func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			VFE_MODULE_CFG);
+		old_val |= BF_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_STATS_AWB_START: {
+		rc = vfe_stats_awb_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of AWB",
+				 __func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= AWB_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_STATS_IHIST_START: {
+		rc = vfe_stats_ihist_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of IHIST",
+				 __func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val |= IHIST_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+
+	case VFE_CMD_STATS_RS_START: {
+		rc = vfe_stats_rs_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of RS",
+				__func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_STATS_CS_START: {
+		rc = vfe_stats_cs_buf_init(vfe40_ctrl, NULL);
+		if (rc < 0) {
+			pr_err("%s: cannot config ping/pong address of CS",
+				__func__);
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_MCE_UPDATE:
+	case VFE_CMD_MCE_CFG:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		/* Incrementing with 4 so as to point to the 2nd Register as
+		the 2nd register has the mce_enable bit */
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+		old_val &= MCE_EN_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4, &new_val, 4);
+		cmdp_local += 1;
+
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8);
+		new_val = *cmdp_local;
+		old_val &= MCE_Q_K_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8, &new_val, 4);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+	case VFE_CMD_CHROMA_SUP_UPDATE:
+	case VFE_CMD_CHROMA_SUP_CFG:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF, cmdp_local, 4);
+
+		cmdp_local += 1;
+		new_val = *cmdp_local;
+		/* Incrementing with 4 so as to point to the 2nd Register as
+		 * the 2nd register has the mce_enable bit
+		 */
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4);
+		old_val &= ~MCE_EN_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 4, &new_val, 4);
+		cmdp_local += 1;
+
+		old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8);
+		new_val = *cmdp_local;
+		old_val &= ~MCE_Q_K_MASK;
+		new_val = new_val | old_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_CHROMA_SUP_OFF + 8, &new_val, 4);
+		}
+		break;
+	case VFE_CMD_BLACK_LEVEL_CFG:
+		rc = -EFAULT;
+		goto proc_general_done;
+
+	case VFE_CMD_LA_CFG:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+
+		cmdp_local += 1;
+		vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+						   cmdp_local, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_LA_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+
+		cmdp_local = cmdp + 1;
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+		if (old_val != 0x0)
+			vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+				cmdp_local, vfe40_ctrl);
+		else
+			vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK1,
+				cmdp_local, vfe40_ctrl);
+		}
+		vfe40_ctrl->update_la = true;
+		break;
+
+	case VFE_CMD_GET_LA_TABLE:
+		temp1 = sizeof(uint32_t) * VFE40_LA_TABLE_LENGTH / 2;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kzalloc(temp1, GFP_KERNEL);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		if (msm_camera_io_r(vfe40_ctrl->
+				share_ctrl->vfebase + V40_LA_OFF))
+			vfe40_program_dmi_cfg(LUMA_ADAPT_LUT_RAM_BANK1,
+						vfe40_ctrl);
+		else
+			vfe40_program_dmi_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+						vfe40_ctrl);
+		for (i = 0 ; i < (VFE40_LA_TABLE_LENGTH / 2) ; i++) {
+			*cmdp_local =
+				msm_camera_io_r(
+					vfe40_ctrl->share_ctrl->vfebase +
+					VFE_DMI_DATA_LO);
+			*cmdp_local |= (msm_camera_io_r(
+				vfe40_ctrl->share_ctrl->vfebase +
+				VFE_DMI_DATA_LO)) << 16;
+			cmdp_local++;
+		}
+		vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_SK_ENHAN_CFG:
+	case VFE_CMD_SK_ENHAN_UPDATE:{
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_SCE_OFF,
+			cmdp, V40_SCE_LEN);
+		}
+		break;
+
+	case VFE_CMD_LIVESHOT:
+		/* Configure primary channel */
+		rc = vfe40_configure_pingpong_buffers(VFE_MSG_CAPTURE,
+					VFE_MSG_OUTPUT_PRIMARY, vfe40_ctrl);
+		if (rc < 0) {
+			pr_err(
+			"%s error configuring pingpong buffers for primary output\n",
+			__func__);
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		vfe40_start_liveshot(pmctl, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_LINEARIZATION_CFG:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1,
+			cmdp_local, V40_LINEARIZATION_LEN1);
+
+		cmdp_local = cmdp + 17;
+		vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK0,
+					cmdp_local, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_LINEARIZATION_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		cmdp_local++;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1 + 4,
+			cmdp_local, (V40_LINEARIZATION_LEN1 - 4));
+		cmdp_local = cmdp + 17;
+		/*extracting the bank select*/
+		old_val = msm_camera_io_r(
+				vfe40_ctrl->share_ctrl->vfebase +
+				V40_LINEARIZATION_OFF1);
+
+		if (old_val != 0x0)
+			vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK0,
+						cmdp_local, vfe40_ctrl);
+		else
+			vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK1,
+						cmdp_local, vfe40_ctrl);
+		vfe40_ctrl->update_linear = true;
+		break;
+
+	case VFE_CMD_GET_LINEARIZATON_TABLE:
+		temp1 = sizeof(uint32_t) * VFE40_LINEARIZATON_TABLE_LENGTH;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kzalloc(temp1, GFP_KERNEL);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		if (msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_LINEARIZATION_OFF1))
+			vfe40_program_dmi_cfg(BLACK_LUT_RAM_BANK1, vfe40_ctrl);
+		else
+			vfe40_program_dmi_cfg(BLACK_LUT_RAM_BANK0, vfe40_ctrl);
+		CDBG("%s: Linearization Table\n", __func__);
+		for (i = 0 ; i < VFE40_LINEARIZATON_TABLE_LENGTH ; i++) {
+			*cmdp_local = msm_camera_io_r(
+				vfe40_ctrl->share_ctrl->vfebase +
+				VFE_DMI_DATA_LO);
+			CDBG("%s: %08x\n", __func__, *cmdp_local);
+			cmdp_local++;
+		}
+		vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_DEMOSAICV3:
+		if (cmd->length !=
+			V40_DEMOSAICV3_0_LEN+V40_DEMOSAICV3_1_LEN) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DEMOSAIC_MASK;
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, V40_DEMOSAICV3_0_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_1_OFF,
+			cmdp_local, V40_DEMOSAICV3_1_LEN);
+		break;
+
+	case VFE_CMD_DEMOSAICV3_UPDATE:
+		if (cmd->length !=
+			V40_DEMOSAICV3_0_LEN * V40_DEMOSAICV3_UP_REG_CNT) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DEMOSAIC_MASK;
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, V40_DEMOSAICV3_0_LEN);
+		/* As the address space is not contiguous increment by 2
+		 * before copying to next address space */
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_1_OFF,
+			cmdp_local, 2 * V40_DEMOSAICV3_0_LEN);
+		/* As the address space is not contiguous increment by 2
+		 * before copying to next address space */
+		cmdp_local += 2;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_2_OFF,
+			cmdp_local, 2 * V40_DEMOSAICV3_0_LEN);
+		break;
+
+	case VFE_CMD_DEMOSAICV3_ABCC_CFG:
+		rc = -EFAULT;
+		break;
+
+	case VFE_CMD_DEMOSAICV3_ABF_UPDATE:/* 116 ABF update  */
+	case VFE_CMD_DEMOSAICV3_ABF_CFG: { /* 108 ABF config  */
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= ABF_MASK;
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, 4);
+
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_DEMOSAICV3_DBCC_CFG:
+	case VFE_CMD_DEMOSAICV3_DBCC_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DBCC_MASK;
+
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+			cmdp_local, 4);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp_local, (vfe40_cmd[cmd->id].length));
+		break;
+
+	case VFE_CMD_DEMOSAICV3_DBPC_CFG:
+	case VFE_CMD_DEMOSAICV3_DBPC_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+		new_val = *cmdp_local;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+		old_val &= DBPC_MASK;
+
+		new_val = new_val | old_val;
+		*cmdp_local = new_val;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_0_OFF,
+			cmdp_local, V40_DEMOSAICV3_0_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF0,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF1,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		cmdp_local += 1;
+		msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+			V40_DEMOSAICV3_DBPC_CFG_OFF2,
+			cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+		break;
+
+	case VFE_CMD_RGB_G_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF,
+			cmdp, 4);
+		cmdp += 1;
+
+		vfe40_write_gamma_cfg(RGBLUT_RAM_CH0_BANK0, cmdp, vfe40_ctrl);
+		vfe40_write_gamma_cfg(RGBLUT_RAM_CH1_BANK0, cmdp, vfe40_ctrl);
+		vfe40_write_gamma_cfg(RGBLUT_RAM_CH2_BANK0, cmdp, vfe40_ctrl);
+		}
+	    cmdp -= 1;
+		break;
+
+	case VFE_CMD_RGB_G_UPDATE: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		cmdp += 1;
+		if (old_val != 0x0) {
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH0_BANK0, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH1_BANK0, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH2_BANK0, cmdp, vfe40_ctrl);
+		} else {
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH0_BANK1, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH1_BANK1, cmdp, vfe40_ctrl);
+			vfe40_write_gamma_cfg(
+				RGBLUT_RAM_CH2_BANK1, cmdp, vfe40_ctrl);
+		}
+		}
+		vfe40_ctrl->update_gamma = TRUE;
+		cmdp -= 1;
+		break;
+
+	case VFE_CMD_GET_RGB_G_TABLE:
+		temp1 = sizeof(uint32_t) * VFE40_GAMMA_NUM_ENTRIES * 3;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kzalloc(temp1, GFP_KERNEL);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		cmdp_local = cmdp;
+
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+		temp2 = old_val ? RGBLUT_RAM_CH0_BANK1 :
+			RGBLUT_RAM_CH0_BANK0;
+		for (i = 0; i < 3; i++) {
+			vfe40_read_gamma_cfg(temp2,
+				cmdp_local + (VFE40_GAMMA_NUM_ENTRIES * i),
+				vfe40_ctrl);
+			temp2 += 2;
+		}
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+
+	case VFE_CMD_STATS_AWB_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~AWB_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case VFE_CMD_STATS_AE_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~BG_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case VFE_CMD_STATS_AF_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~BF_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		rc = vfe40_stats_flush_enqueue(vfe40_ctrl, MSM_STATS_TYPE_AF);
+		if (rc < 0) {
+			pr_err("%s: dq stats buf err = %d",
+				   __func__, rc);
+			return -EINVAL;
+		}
+		}
+		break;
+
+	case VFE_CMD_STATS_IHIST_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~IHIST_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case VFE_CMD_STATS_RS_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~RS_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+
+	case VFE_CMD_STATS_CS_STOP: {
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= ~CS_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		}
+		break;
+	case VFE_CMD_STOP:
+		CDBG("vfe40_proc_general: cmdID = %s\n",
+			vfe40_general_cmd[cmd->id]);
+		vfe40_stop(vfe40_ctrl);
+		break;
+
+	case VFE_CMD_SYNC_TIMER_SETTING:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		vfe40_sync_timer_start(cmdp, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_MODULE_CFG: {
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+			(void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		*cmdp &= ~STATS_ENABLE_MASK;
+		old_val = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		old_val &= STATS_ENABLE_MASK;
+		*cmdp |= old_val;
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		}
+		break;
+
+	case VFE_CMD_ZSL:
+		rc = vfe40_configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_PRIMARY, vfe40_ctrl);
+		if (rc < 0)
+			goto proc_general_done;
+		rc = vfe40_configure_pingpong_buffers(VFE_MSG_START,
+			VFE_MSG_OUTPUT_SECONDARY, vfe40_ctrl);
+		if (rc < 0)
+			goto proc_general_done;
+
+		rc = vfe40_zsl(pmctl, vfe40_ctrl);
+		break;
+
+	case VFE_CMD_ASF_CFG:
+	case VFE_CMD_ASF_UPDATE:
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp, (void __user *)(cmd->value),
+			cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		cmdp_local = cmdp + V40_ASF_LEN/4;
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			V40_ASF_SPECIAL_EFX_CFG_OFF,
+			cmdp_local, V40_ASF_SPECIAL_EFX_CFG_LEN);
+		break;
+
+	case VFE_CMD_GET_HW_VERSION:
+		if (cmd->length != V40_GET_HW_VERSION_LEN) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(V40_GET_HW_VERSION_LEN, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		*cmdp = msm_camera_io_r(
+			vfe40_ctrl->share_ctrl->vfebase+V40_GET_HW_VERSION_OFF);
+		if (copy_to_user((void __user *)(cmd->value), cmdp,
+			V40_GET_HW_VERSION_LEN)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_GET_REG_DUMP:
+		temp1 = sizeof(uint32_t) *
+			vfe40_ctrl->share_ctrl->register_total;
+		if (cmd->length != temp1) {
+			rc = -EINVAL;
+			goto proc_general_done;
+		}
+		cmdp = kmalloc(temp1, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		msm_camera_io_dump(vfe40_ctrl->share_ctrl->vfebase,
+			vfe40_ctrl->share_ctrl->register_total*4);
+		CDBG("%s: %p %p %d\n", __func__, (void *)cmdp,
+			vfe40_ctrl->share_ctrl->vfebase, temp1);
+		memcpy_fromio((void *)cmdp,
+			vfe40_ctrl->share_ctrl->vfebase, temp1);
+		if (copy_to_user((void __user *)(cmd->value), cmdp, temp1)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		break;
+	case VFE_CMD_FRAME_SKIP_CFG:
+		if (cmd->length != vfe40_cmd[cmd->id].length)
+			return -EINVAL;
+
+		cmdp = kmalloc(vfe40_cmd[cmd->id].length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+
+		if (copy_from_user((cmdp), (void __user *)cmd->value,
+				cmd->length)) {
+			rc = -EFAULT;
+			pr_err("%s copy from user failed for cmd %d",
+				__func__, cmd->id);
+			break;
+		}
+
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		vfe40_ctrl->frame_skip_cnt = ((uint32_t)
+			*cmdp & VFE_FRAME_SKIP_PERIOD_MASK) + 1;
+		vfe40_ctrl->frame_skip_pattern = (uint32_t)(*(cmdp + 2));
+		break;
+	default:
+		if (cmd->length != vfe40_cmd[cmd->id].length)
+			return -EINVAL;
+
+		cmdp = kmalloc(vfe40_cmd[cmd->id].length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+
+		if (copy_from_user((cmdp), (void __user *)cmd->value,
+				cmd->length)) {
+			rc = -EFAULT;
+			pr_err("%s copy from user failed for cmd %d",
+				__func__, cmd->id);
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_cmd[cmd->id].offset,
+			cmdp, (vfe40_cmd[cmd->id].length));
+		break;
+
+	}
+
+proc_general_done:
+	kfree(cmdp);
+
+	return rc;
+}
+
+static inline void vfe40_read_irq_status(
+	struct axi_ctrl_t *axi_ctrl, struct vfe40_irq_status *out)
+{
+	uint32_t *temp;
+	memset(out, 0, sizeof(struct vfe40_irq_status));
+	temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_IRQ_STATUS_0);
+	out->vfeIrqStatus0 = msm_camera_io_r(temp);
+
+	temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_IRQ_STATUS_1);
+	out->vfeIrqStatus1 = msm_camera_io_r(temp);
+
+	temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_CAMIF_STATUS);
+	out->camifStatus = msm_camera_io_r(temp);
+	CDBG("camifStatus  = 0x%x\n", out->camifStatus);
+
+	/* clear the pending interrupt of the same kind.*/
+	msm_camera_io_w(out->vfeIrqStatus0,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+	msm_camera_io_w(out->vfeIrqStatus1,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(1, axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+}
+
+static void vfe40_process_reg_update_irq(
+		struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+
+	if (vfe40_ctrl->recording_state == VFE_STATE_START_REQUESTED) {
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		} else if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		}
+		vfe40_ctrl->recording_state = VFE_STATE_STARTED;
+		msm_camera_io_w_mb(1,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		CDBG("start video triggered .\n");
+	} else if (vfe40_ctrl->recording_state ==
+			VFE_STATE_STOP_REQUESTED) {
+		if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		} else if (vfe40_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		}
+		CDBG("stop video triggered .\n");
+	}
+
+	if (vfe40_ctrl->start_ack_pending == TRUE) {
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
+		vfe40_ctrl->start_ack_pending = FALSE;
+	} else {
+		if (vfe40_ctrl->recording_state ==
+				VFE_STATE_STOP_REQUESTED) {
+			vfe40_ctrl->recording_state = VFE_STATE_STOPPED;
+			/* request a reg update and send STOP_REC_ACK
+			 * when we process the next reg update irq.
+			 */
+			msm_camera_io_w_mb(1,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+		} else if (vfe40_ctrl->recording_state ==
+					VFE_STATE_STOPPED) {
+			vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+				vfe40_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_STOP_REC_ACK);
+			vfe40_ctrl->recording_state = VFE_STATE_IDLE;
+		}
+		spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+		if (vfe40_ctrl->update_ack_pending == TRUE) {
+			vfe40_ctrl->update_ack_pending = FALSE;
+			spin_unlock_irqrestore(
+				&vfe40_ctrl->update_ack_lock, flags);
+			vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+				vfe40_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_UPDATE_ACK);
+		} else {
+			spin_unlock_irqrestore(
+				&vfe40_ctrl->update_ack_lock, flags);
+		}
+	}
+
+	if (vfe40_ctrl->share_ctrl->liveshot_state ==
+		VFE_STATE_START_REQUESTED) {
+		CDBG("%s enabling liveshot output\n", __func__);
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+			vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+			vfe40_ctrl->share_ctrl->liveshot_state =
+				VFE_STATE_STARTED;
+		}
+	}
+
+	if (vfe40_ctrl->share_ctrl->liveshot_state == VFE_STATE_STARTED) {
+		vfe40_ctrl->share_ctrl->vfe_capture_count--;
+		if (!vfe40_ctrl->share_ctrl->vfe_capture_count)
+			vfe40_ctrl->share_ctrl->liveshot_state =
+				VFE_STATE_STOP_REQUESTED;
+		msm_camera_io_w_mb(1, vfe40_ctrl->
+			share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	} else if (vfe40_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOP_REQUESTED) {
+		CDBG("%s: disabling liveshot output\n", __func__);
+		if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[vfe40_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+			vfe40_ctrl->share_ctrl->liveshot_state =
+				VFE_STATE_STOPPED;
+			msm_camera_io_w_mb(1, vfe40_ctrl->share_ctrl->vfebase +
+				VFE_REG_UPDATE_CMD);
+		}
+	} else if (vfe40_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOPPED) {
+		vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+	}
+
+	if ((vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN) ||
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB) ||
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG) ||
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB)) {
+		/* in snapshot mode */
+		/* later we need to add check for live snapshot mode. */
+		if (vfe40_ctrl->frame_skip_pattern & (0x1 <<
+			(vfe40_ctrl->snapshot_frame_cnt %
+				vfe40_ctrl->frame_skip_cnt))) {
+			vfe40_ctrl->share_ctrl->vfe_capture_count--;
+			/* if last frame to be captured: */
+			if (vfe40_ctrl->share_ctrl->vfe_capture_count == 0) {
+				/* stop the bus output:write master enable = 0*/
+				if (vfe40_ctrl->share_ctrl->outpath.output_mode
+					& VFE40_OUTPUT_MODE_PRIMARY) {
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out0.ch0]);
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out0.ch1]);
+				}
+				if (vfe40_ctrl->share_ctrl->outpath.output_mode&
+						VFE40_OUTPUT_MODE_SECONDARY) {
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out1.ch0]);
+					msm_camera_io_w(0,
+						vfe40_ctrl->share_ctrl->vfebase+
+						vfe40_AXI_WM_CFG[vfe40_ctrl->
+						share_ctrl->outpath.out1.ch1]);
+				}
+				msm_camera_io_w_mb
+				(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+				vfe40_ctrl->share_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+				vfe40_ctrl->snapshot_frame_cnt = -1;
+				vfe40_ctrl->frame_skip_cnt = 31;
+				vfe40_ctrl->frame_skip_pattern = 0xffffffff;
+			} /*if snapshot count is 0*/
+		} /*if frame is not being dropped*/
+		vfe40_ctrl->snapshot_frame_cnt++;
+		/* then do reg_update. */
+		msm_camera_io_w(1,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+	} /* if snapshot mode. */
+}
+
+static void vfe40_set_default_reg_values(
+			struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	msm_camera_io_w(0x800080,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_DEMUX_GAIN_0);
+	msm_camera_io_w(0x800080,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_DEMUX_GAIN_1);
+	/* What value should we program CGC_OVERRIDE to? */
+	msm_camera_io_w(0xFFFFF,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CGC_OVERRIDE);
+
+	/* default frame drop period and pattern */
+	msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_ENC_MIN);
+	msm_camera_io_w(0xFFFFFF,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_ENC_MAX);
+	msm_camera_io_w(0,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_VIEW_MIN);
+	msm_camera_io_w(0xFFFFFF,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_VIEW_MAX);
+
+	/* stats UB config */
+	msm_camera_io_w(0x3980007,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
+	msm_camera_io_w(0x3A00007,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
+	msm_camera_io_w(0x3A8000F,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
+	msm_camera_io_w(0x3B80007,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
+	msm_camera_io_w(0x3C0001F,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
+	msm_camera_io_w(0x3E0001F,
+		vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+}
+
+static void vfe40_process_reset_irq(
+		struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+
+	atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+	spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+	if (vfe40_ctrl->share_ctrl->stop_ack_pending) {
+		vfe40_ctrl->share_ctrl->stop_ack_pending = FALSE;
+		spin_unlock_irqrestore(
+			&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_STOP_ACK);
+	} else {
+		spin_unlock_irqrestore(
+			&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+		/* this is from reset command. */
+		vfe40_set_default_reg_values(vfe40_ctrl);
+
+		/* reload all write masters. (frame & line)*/
+		msm_camera_io_w(0x7FFF,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_CMD);
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_RESET_ACK);
+	}
+}
+
+static void vfe40_process_camif_sof_irq(
+		struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	if (vfe40_ctrl->share_ctrl->operation_mode ==
+		VFE_OUTPUTS_RAW) {
+		if (vfe40_ctrl->start_ack_pending) {
+			vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+				vfe40_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_START_ACK);
+			vfe40_ctrl->start_ack_pending = FALSE;
+		}
+		vfe40_ctrl->share_ctrl->vfe_capture_count--;
+		/* if last frame to be captured: */
+		if (vfe40_ctrl->share_ctrl->vfe_capture_count == 0) {
+			/* Ensure the write order while writing
+			 to the command register using the barrier */
+			msm_camera_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+			vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+		}
+	} /* if raw snapshot mode. */
+	if ((vfe40_ctrl->hfr_mode != HFR_MODE_OFF) &&
+		(vfe40_ctrl->share_ctrl->operation_mode ==
+			VFE_MODE_OF_OPERATION_VIDEO) &&
+		(vfe40_ctrl->share_ctrl->vfeFrameId %
+			vfe40_ctrl->hfr_mode != 0)) {
+		vfe40_ctrl->share_ctrl->vfeFrameId++;
+		CDBG("Skip the SOF notification when HFR enabled\n");
+		return;
+	}
+	vfe40_ctrl->share_ctrl->vfeFrameId++;
+	vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+		vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_SOF_ACK);
+	CDBG("camif_sof_irq, frameId = %d\n",
+		vfe40_ctrl->share_ctrl->vfeFrameId);
+
+	if (vfe40_ctrl->sync_timer_state) {
+		if (vfe40_ctrl->sync_timer_repeat_count == 0)
+			vfe40_sync_timer_stop(vfe40_ctrl);
+		else
+			vfe40_ctrl->sync_timer_repeat_count--;
+	}
+}
+
+static void vfe40_process_error_irq(
+	struct axi_ctrl_t *axi_ctrl, uint32_t errStatus)
+{
+	uint32_t reg_value;
+
+	if (errStatus & VFE40_IMASK_CAMIF_ERROR) {
+		pr_err("vfe40_irq: camif errors\n");
+		reg_value = msm_camera_io_r(
+			axi_ctrl->share_ctrl->vfebase + VFE_CAMIF_STATUS);
+		pr_err("camifStatus  = 0x%x\n", reg_value);
+		vfe40_send_isp_msg(&axi_ctrl->subdev,
+			axi_ctrl->share_ctrl->vfeFrameId, MSG_ID_CAMIF_ERROR);
+	}
+
+	if (errStatus & VFE40_IMASK_BHIST_OVWR)
+		pr_err("vfe40_irq: stats bhist overwrite\n");
+
+	if (errStatus & VFE40_IMASK_STATS_CS_OVWR)
+		pr_err("vfe40_irq: stats cs overwrite\n");
+
+	if (errStatus & VFE40_IMASK_STATS_IHIST_OVWR)
+		pr_err("vfe40_irq: stats ihist overwrite\n");
+
+	if (errStatus & VFE40_IMASK_REALIGN_BUF_Y_OVFL)
+		pr_err("vfe40_irq: realign bug Y overflow\n");
+
+	if (errStatus & VFE40_IMASK_REALIGN_BUF_CB_OVFL)
+		pr_err("vfe40_irq: realign bug CB overflow\n");
+
+	if (errStatus & VFE40_IMASK_REALIGN_BUF_CR_OVFL)
+		pr_err("vfe40_irq: realign bug CR overflow\n");
+
+	if (errStatus & VFE40_IMASK_VIOLATION) {
+		pr_err("vfe40_irq: violation interrupt\n");
+		reg_value = msm_camera_io_r(
+			axi_ctrl->share_ctrl->vfebase + VFE_VIOLATION_STATUS);
+		pr_err("%s: violationStatus  = 0x%x\n", __func__, reg_value);
+	}
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_0_BUS_OVFL)
+		pr_err("vfe40_irq: image master 0 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_1_BUS_OVFL)
+		pr_err("vfe40_irq: image master 1 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_2_BUS_OVFL)
+		pr_err("vfe40_irq: image master 2 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_3_BUS_OVFL)
+		pr_err("vfe40_irq: image master 3 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_4_BUS_OVFL)
+		pr_err("vfe40_irq: image master 4 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_5_BUS_OVFL)
+		pr_err("vfe40_irq: image master 5 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_IMG_MAST_6_BUS_OVFL)
+		pr_err("vfe40_irq: image master 6 bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_AE_BG_BUS_OVFL)
+		pr_err("vfe40_irq: ae/bg stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_AF_BF_BUS_OVFL)
+		pr_err("vfe40_irq: af/bf stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_AWB_BUS_OVFL)
+		pr_err("vfe40_irq: awb stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_RS_BUS_OVFL)
+		pr_err("vfe40_irq: rs stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_CS_BUS_OVFL)
+		pr_err("vfe40_irq: cs stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_IHIST_BUS_OVFL)
+		pr_err("vfe40_irq: ihist stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_STATS_SKIN_BHIST_BUS_OVFL)
+		pr_err("vfe40_irq: skin/bhist stats bus overflow\n");
+
+	if (errStatus & VFE40_IMASK_AXI_ERROR)
+		pr_err("vfe40_irq: axi error\n");
+}
+
+static uint32_t  vfe40_process_stats_irq_common(
+	struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t statsNum, uint32_t newAddr)
+{
+	uint32_t pingpongStatus;
+	uint32_t returnAddr;
+	uint32_t pingpongAddr;
+
+	/* must be 0=ping, 1=pong */
+	pingpongStatus =
+		((msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+		VFE_BUS_PING_PONG_STATUS))
+	& ((uint32_t)(1<<(statsNum + 7)))) >> (statsNum + 7);
+	/* stats bits starts at 7 */
+	CDBG("statsNum %d, pingpongStatus %d\n", statsNum, pingpongStatus);
+	pingpongAddr =
+		((uint32_t)(vfe40_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_PING_PONG_BASE)) +
+				(3*statsNum)*4 + (1-pingpongStatus)*4;
+	returnAddr = msm_camera_io_r((uint32_t *)pingpongAddr);
+	msm_camera_io_w(newAddr, (uint32_t *)pingpongAddr);
+	return returnAddr;
+}
+
+static void
+vfe_send_stats_msg(struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t bufAddress, uint32_t statsNum)
+{
+	int rc = 0;
+	void *vaddr = NULL;
+	/* fill message with right content. */
+	/* @todo This is causing issues, need further investigate */
+	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
+	struct isp_msg_stats msgStats;
+	msgStats.frameCounter = vfe40_ctrl->share_ctrl->vfeFrameId;
+	msgStats.buffer = bufAddress;
+
+	switch (statsNum) {
+	case statsAeNum:{
+		msgStats.id = MSG_ID_STATS_AEC;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AEC, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsAfNum:{
+		msgStats.id = MSG_ID_STATS_AF;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AF, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsAwbNum: {
+		msgStats.id = MSG_ID_STATS_AWB;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_AWB, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+
+	case statsIhistNum: {
+		msgStats.id = MSG_ID_STATS_IHIST;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_IHIST, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsRsNum: {
+		msgStats.id = MSG_ID_STATS_RS;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_RS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+	case statsCsNum: {
+		msgStats.id = MSG_ID_STATS_CS;
+		rc = vfe40_ctrl->stats_ops.dispatch(
+				vfe40_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_CS, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe40_ctrl->stats_ops.client);
+		}
+		break;
+
+	default:
+		goto stats_done;
+	}
+	if (rc == 0) {
+		msgStats.buffer = (uint32_t)vaddr;
+		v4l2_subdev_notify(&vfe40_ctrl->subdev,
+			NOTIFY_VFE_MSG_STATS,
+			&msgStats);
+	} else {
+		pr_err("%s: paddr to idx mapping error, stats_id = %d, paddr = 0x%d",
+			 __func__, msgStats.id, msgStats.buffer);
+	}
+stats_done:
+	spin_unlock_irqrestore(&ctrl->state_lock, flags);
+	return;
+}
+
+static void vfe_send_comp_stats_msg(
+	struct vfe40_ctrl_type *vfe40_ctrl, uint32_t status_bits)
+{
+	struct msm_stats_buf msgStats;
+	uint32_t temp;
+
+	msgStats.frame_id = vfe40_ctrl->share_ctrl->vfeFrameId;
+	msgStats.status_bits = status_bits;
+
+	msgStats.aec.buff = vfe40_ctrl->aecStatsControl.bufToRender;
+	msgStats.awb.buff = vfe40_ctrl->awbStatsControl.bufToRender;
+	msgStats.af.buff = vfe40_ctrl->afStatsControl.bufToRender;
+
+	msgStats.ihist.buff = vfe40_ctrl->ihistStatsControl.bufToRender;
+	msgStats.rs.buff = vfe40_ctrl->rsStatsControl.bufToRender;
+	msgStats.cs.buff = vfe40_ctrl->csStatsControl.bufToRender;
+
+	temp = msm_camera_io_r(
+		vfe40_ctrl->share_ctrl->vfebase + VFE_STATS_AWB_SGW_CFG);
+	msgStats.awb_ymin = (0xFF00 & temp) >> 8;
+
+	v4l2_subdev_notify(&vfe40_ctrl->subdev,
+				NOTIFY_VFE_MSG_COMP_STATS,
+				&msgStats);
+}
+
+static void vfe40_process_stats_awb_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->awbStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(vfe40_ctrl, statsAwbNum,
+			addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->awbStatsControl.bufToRender, statsAwbNum);
+	} else{
+		vfe40_ctrl->awbStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->awbStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats_ihist_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->ihistStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(
+			vfe40_ctrl, statsIhistNum, addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->ihistStatsControl.bufToRender,
+			statsIhistNum);
+	} else {
+		vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats_rs_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->rsStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(vfe40_ctrl, statsRsNum,
+			addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->rsStatsControl.bufToRender, statsRsNum);
+	} else {
+		vfe40_ctrl->rsStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->rsStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats_cs_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe40_ctrl->csStatsControl.bufToRender =
+			vfe40_process_stats_irq_common(vfe40_ctrl, statsCsNum,
+			addr);
+
+		vfe_send_stats_msg(vfe40_ctrl,
+			vfe40_ctrl->csStatsControl.bufToRender, statsCsNum);
+	} else {
+		vfe40_ctrl->csStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe40_ctrl->csStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe40_process_stats(struct vfe40_ctrl_type *vfe40_ctrl,
+	uint32_t status_bits)
+{
+	unsigned long flags;
+	int32_t process_stats = false;
+	uint32_t addr;
+
+	CDBG("%s, stats = 0x%x\n", __func__, status_bits);
+	spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+			MSM_STATS_TYPE_AWB);
+		if (addr) {
+			vfe40_ctrl->awbStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsAwbNum,
+				addr);
+			process_stats = true;
+		} else{
+			vfe40_ctrl->awbStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->awbStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->awbStatsControl.bufToRender = 0;
+	}
+
+	if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+					MSM_STATS_TYPE_IHIST);
+		if (addr) {
+			vfe40_ctrl->ihistStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsIhistNum,
+				addr);
+			process_stats = true;
+		} else {
+			vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->ihistStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->ihistStatsControl.bufToRender = 0;
+	}
+
+	if (status_bits & VFE_IRQ_STATUS0_STATS_RS) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+					MSM_STATS_TYPE_RS);
+		if (addr) {
+			vfe40_ctrl->rsStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsRsNum,
+				addr);
+			process_stats = true;
+		} else {
+			vfe40_ctrl->rsStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->rsStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->rsStatsControl.bufToRender = 0;
+	}
+
+	if (status_bits & VFE_IRQ_STATUS0_STATS_CS) {
+		addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+					MSM_STATS_TYPE_CS);
+		if (addr) {
+			vfe40_ctrl->csStatsControl.bufToRender =
+				vfe40_process_stats_irq_common(
+				vfe40_ctrl, statsCsNum,
+				addr);
+			process_stats = true;
+		} else {
+			vfe40_ctrl->csStatsControl.droppedStatsFrameCount++;
+			vfe40_ctrl->csStatsControl.bufToRender = 0;
+		}
+	} else {
+		vfe40_ctrl->csStatsControl.bufToRender = 0;
+	}
+	spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+	if (process_stats)
+		vfe_send_comp_stats_msg(vfe40_ctrl, status_bits);
+
+	return;
+}
+
+static void vfe40_process_stats_irq(
+	struct vfe40_ctrl_type *vfe40_ctrl, uint32_t irqstatus)
+{
+	uint32_t status_bits = VFE_COM_STATUS & irqstatus;
+
+	if ((vfe40_ctrl->hfr_mode != HFR_MODE_OFF) &&
+		(vfe40_ctrl->share_ctrl->vfeFrameId %
+		 vfe40_ctrl->hfr_mode != 0)) {
+		CDBG("Skip the stats when HFR enabled\n");
+		return;
+	}
+
+	vfe40_process_stats(vfe40_ctrl, status_bits);
+	return;
+}
+
+static void vfe40_process_irq(
+	struct vfe40_ctrl_type *vfe40_ctrl, uint32_t irqstatus)
+{
+	if (irqstatus &
+		VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0) {
+		vfe40_process_stats_irq(vfe40_ctrl, irqstatus);
+		return;
+	}
+
+	switch (irqstatus) {
+	case VFE_IRQ_STATUS0_CAMIF_SOF_MASK:
+		CDBG("irq	camifSofIrq\n");
+		vfe40_process_camif_sof_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_REG_UPDATE_MASK:
+		CDBG("irq	regUpdateIrq\n");
+		vfe40_process_reg_update_irq(vfe40_ctrl);
+		break;
+	case VFE_IMASK_WHILE_STOPPING_0:
+		CDBG("irq	resetAckIrq\n");
+		vfe40_process_reset_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_AWB:
+		CDBG("Stats AWB irq occured.\n");
+		vfe40_process_stats_awb_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_IHIST:
+		CDBG("Stats IHIST irq occured.\n");
+		vfe40_process_stats_ihist_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_RS:
+		CDBG("Stats RS irq occured.\n");
+		vfe40_process_stats_rs_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_CS:
+		CDBG("Stats CS irq occured.\n");
+		vfe40_process_stats_cs_irq(vfe40_ctrl);
+		break;
+	case VFE_IRQ_STATUS1_SYNC_TIMER0:
+		CDBG("SYNC_TIMER 0 irq occured.\n");
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_SYNC_TIMER0_DONE);
+		break;
+	case VFE_IRQ_STATUS1_SYNC_TIMER1:
+		CDBG("SYNC_TIMER 1 irq occured.\n");
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_SYNC_TIMER1_DONE);
+		break;
+	case VFE_IRQ_STATUS1_SYNC_TIMER2:
+		CDBG("SYNC_TIMER 2 irq occured.\n");
+		vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+			vfe40_ctrl->share_ctrl->vfeFrameId,
+			MSG_ID_SYNC_TIMER2_DONE);
+		break;
+	default:
+		pr_err("Invalid IRQ status\n");
+	}
+}
+
+static void axi40_do_tasklet(unsigned long data)
+{
+	unsigned long flags;
+	struct axi_ctrl_t *axi_ctrl = (struct axi_ctrl_t *)data;
+	struct vfe40_isr_queue_cmd *qcmd = NULL;
+
+	CDBG("=== axi40_do_tasklet start ===\n");
+
+	while (atomic_read(&axi_ctrl->share_ctrl->irq_cnt)) {
+		spin_lock_irqsave(&axi_ctrl->tasklet_lock, flags);
+		qcmd = list_first_entry(&axi_ctrl->tasklet_q,
+			struct vfe40_isr_queue_cmd, list);
+		atomic_sub(1, &axi_ctrl->share_ctrl->irq_cnt);
+
+		if (!qcmd) {
+			spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
+				flags);
+			return;
+		}
+
+		list_del(&qcmd->list);
+		spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
+			flags);
+
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_CAMIF_SOF_MASK)
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IRQ_STATUS0_CAMIF_SOF_MASK);
+
+		/* interrupt to be processed,  *qcmd has the payload.  */
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IRQ_STATUS0_REG_UPDATE_MASK) {
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IRQ_STATUS0_REG_UPDATE_MASK);
+		}
+
+		if (qcmd->vfeInterruptStatus0 &
+				VFE_IMASK_WHILE_STOPPING_0)
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IMASK_WHILE_STOPPING_0);
+
+		if (atomic_read(&axi_ctrl->share_ctrl->vstate)) {
+			if (qcmd->vfeInterruptStatus1 &
+					VFE40_IMASK_ERROR_ONLY_1) {
+				pr_err("irq	errorIrq\n");
+				vfe40_process_error_irq(
+					axi_ctrl,
+					qcmd->vfeInterruptStatus1 &
+					VFE40_IMASK_ERROR_ONLY_1);
+			}
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_AXI_IRQ,
+				(void *)qcmd->vfeInterruptStatus0);
+
+			/* then process stats irq. */
+			if (axi_ctrl->share_ctrl->stats_comp) {
+				/* process stats comb interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+					VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0) {
+					CDBG("Stats composite irq occured.\n");
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)qcmd->vfeInterruptStatus0);
+				}
+			} else {
+				/* process individual stats interrupt. */
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_AWB)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_AWB);
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_IHIST)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_IHIST);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_RS)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_RS);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_CS)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_CS);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS1_SYNC_TIMER0)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS1_SYNC_TIMER0);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS1_SYNC_TIMER1)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS1_SYNC_TIMER1);
+
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS1_SYNC_TIMER2)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS1_SYNC_TIMER2);
+			}
+		}
+		kfree(qcmd);
+	}
+	CDBG("=== axi40_do_tasklet end ===\n");
+}
+
+static irqreturn_t vfe40_parse_irq(int irq_num, void *data)
+{
+	unsigned long flags;
+	struct vfe40_irq_status irq;
+	struct vfe40_isr_queue_cmd *qcmd;
+	struct axi_ctrl_t *axi_ctrl = data;
+
+	CDBG("vfe_parse_irq\n");
+
+	vfe40_read_irq_status(axi_ctrl, &irq);
+
+	if ((irq.vfeIrqStatus0 == 0) && (irq.vfeIrqStatus1 == 0)) {
+		CDBG("vfe_parse_irq: vfeIrqStatus0 & 1 are both 0!\n");
+		return IRQ_HANDLED;
+	}
+
+	qcmd = kzalloc(sizeof(struct vfe40_isr_queue_cmd),
+		GFP_ATOMIC);
+	if (!qcmd) {
+		pr_err("vfe_parse_irq: qcmd malloc failed!\n");
+		return IRQ_HANDLED;
+	}
+
+	spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+	if (axi_ctrl->share_ctrl->stop_ack_pending) {
+		irq.vfeIrqStatus0 &= VFE_IMASK_WHILE_STOPPING_0;
+		irq.vfeIrqStatus1 &= VFE_IMASK_WHILE_STOPPING_1;
+	}
+	spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+
+	CDBG("vfe_parse_irq: Irq_status0 = 0x%x, Irq_status1 = 0x%x.\n",
+		irq.vfeIrqStatus0, irq.vfeIrqStatus1);
+
+	qcmd->vfeInterruptStatus0 = irq.vfeIrqStatus0;
+	qcmd->vfeInterruptStatus1 = irq.vfeIrqStatus1;
+
+	spin_lock_irqsave(&axi_ctrl->tasklet_lock, flags);
+	list_add_tail(&qcmd->list, &axi_ctrl->tasklet_q);
+
+	atomic_add(1, &axi_ctrl->share_ctrl->irq_cnt);
+	spin_unlock_irqrestore(&axi_ctrl->tasklet_lock, flags);
+	tasklet_schedule(&axi_ctrl->vfe40_tasklet);
+	return IRQ_HANDLED;
+}
+
+
+static long vfe_stats_bufq_sub_ioctl(
+	struct vfe40_ctrl_type *vfe_ctrl,
+	struct msm_vfe_cfg_cmd *cmd, void *ion_client)
+{
+	long rc = 0;
+	switch (cmd->cmd_type) {
+	case VFE_CMD_STATS_REQBUF:
+	if (!vfe_ctrl->stats_ops.stats_ctrl) {
+		/* stats_ctrl has not been init yet */
+		rc = msm_stats_buf_ops_init(&vfe_ctrl->stats_ctrl,
+				(struct ion_client *)ion_client,
+				&vfe_ctrl->stats_ops);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats ops", __func__);
+			goto end;
+		}
+		rc = vfe_ctrl->stats_ops.stats_ctrl_init(&vfe_ctrl->stats_ctrl);
+		if (rc < 0) {
+			pr_err("%s: cannot init stats_ctrl ops", __func__);
+			memset(&vfe_ctrl->stats_ops, 0,
+				sizeof(vfe_ctrl->stats_ops));
+			goto end;
+		}
+		if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats reqbuf input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				 __func__, cmd->length,
+				sizeof(struct msm_stats_reqbuf));
+			rc = -EINVAL ;
+			goto end;
+		}
+	}
+	rc = vfe_ctrl->stats_ops.reqbuf(
+			&vfe_ctrl->stats_ctrl,
+			(struct msm_stats_reqbuf *)cmd->value,
+			vfe_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_ENQUEUEBUF:
+	if (sizeof(struct msm_stats_buf_info) != cmd->length) {
+		/* error. the length not match */
+		pr_err("%s: stats enqueuebuf input size = %d,\n"
+			"struct size = %d, mitch match\n",
+			 __func__, cmd->length,
+			sizeof(struct msm_stats_buf_info));
+			rc = -EINVAL;
+			goto end;
+	}
+	rc = vfe_ctrl->stats_ops.enqueue_buf(
+			&vfe_ctrl->stats_ctrl,
+			(struct msm_stats_buf_info *)cmd->value,
+			vfe_ctrl->stats_ops.client);
+	break;
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+	{
+		struct msm_stats_flush_bufq *flush_req = NULL;
+		flush_req = (struct msm_stats_flush_bufq *)cmd->value;
+		if (sizeof(struct msm_stats_flush_bufq) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats flush queue input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				__func__, cmd->length,
+				sizeof(struct msm_stats_flush_bufq));
+			rc = -EINVAL;
+			goto end;
+	}
+	rc = vfe_ctrl->stats_ops.bufq_flush(
+			&vfe_ctrl->stats_ctrl,
+			(enum msm_stats_enum_type)flush_req->stats_type,
+			vfe_ctrl->stats_ops.client);
+	}
+	break;
+	default:
+		rc = -1;
+		pr_err("%s: cmd_type %d not supported", __func__,
+			cmd->cmd_type);
+	break;
+	}
+end:
+	return rc;
+}
+
+static long msm_vfe_subdev_ioctl(struct v4l2_subdev *sd,
+			unsigned int subdev_cmd, void *arg)
+{
+	struct msm_cam_media_controller *pmctl =
+		(struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
+	struct vfe40_ctrl_type *vfe40_ctrl =
+		(struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+	struct msm_isp_cmd vfecmd;
+	struct msm_camvfe_params *vfe_params =
+		(struct msm_camvfe_params *)arg;
+	struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+	void *data = vfe_params->data;
+
+	long rc = 0;
+	struct vfe_cmd_stats_buf *scfg = NULL;
+	struct vfe_cmd_stats_ack *sack = NULL;
+
+	if (!vfe40_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return -EFAULT;
+	}
+
+	switch (cmd->cmd_type) {
+	case CMD_VFE_PROCESS_IRQ:
+		vfe40_process_irq(vfe40_ctrl, (uint32_t) data);
+		return rc;
+	case VFE_CMD_STATS_REQBUF:
+	case VFE_CMD_STATS_ENQUEUEBUF:
+	case VFE_CMD_STATS_FLUSH_BUFQ:
+		/* for easy porting put in one envelope */
+		rc = vfe_stats_bufq_sub_ioctl(vfe40_ctrl,
+				cmd, vfe_params->data);
+		return rc;
+	default:
+		if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+			cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+			cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+			cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+			cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+				if (copy_from_user(&vfecmd,
+					(void __user *)(cmd->value),
+					sizeof(vfecmd))) {
+						pr_err("%s %d: copy_from_user failed\n",
+							__func__, __LINE__);
+					return -EFAULT;
+				}
+		} else {
+			/* here eith stats release or frame release. */
+			if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+				cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+				cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
+				/* then must be stats release. */
+				if (!data) {
+					pr_err("%s: data = NULL, cmd->cmd_type = %d",
+						__func__, cmd->cmd_type);
+					return -EFAULT;
+				}
+				sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
+							GFP_ATOMIC);
+				if (!sack) {
+					pr_err("%s: no mem for cmd->cmd_type = %d",
+					 __func__, cmd->cmd_type);
+					return -ENOMEM;
+				}
+				sack->nextStatsBuf = *(uint32_t *)data;
+			}
+		}
+		CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+		if ((cmd->cmd_type == CMD_STATS_AF_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_AWB_ENABLE)   ||
+			(cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+			(cmd->cmd_type == CMD_STATS_RS_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_CS_ENABLE)    ||
+			(cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
+				scfg = NULL;
+				/* individual */
+				goto vfe40_config_done;
+		}
+		switch (cmd->cmd_type) {
+		case CMD_GENERAL:
+			rc = vfe40_proc_general(pmctl, &vfecmd, vfe40_ctrl);
+		break;
+		case CMD_CONFIG_PING_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe40_output_ch *outch =
+				vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+			outch->ping = *((struct msm_free_buf *)data);
+		}
+		break;
+
+		case CMD_CONFIG_PONG_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe40_output_ch *outch =
+				vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+			outch->pong = *((struct msm_free_buf *)data);
+		}
+		break;
+
+		case CMD_CONFIG_FREE_BUF_ADDR: {
+			int path = *((int *)cmd->value);
+			struct vfe40_output_ch *outch =
+				vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+			outch->free_buf = *((struct msm_free_buf *)data);
+		}
+		break;
+		case CMD_SNAP_BUF_RELEASE:
+			break;
+		default:
+			pr_err("%s Unsupported AXI configuration %x ", __func__,
+				cmd->cmd_type);
+		break;
+		}
+	}
+vfe40_config_done:
+	kfree(scfg);
+	kfree(sack);
+	CDBG("%s done: rc = %d\n", __func__, (int) rc);
+	return rc;
+}
+
+static const struct v4l2_subdev_core_ops msm_vfe_subdev_core_ops = {
+	.ioctl = msm_vfe_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_vfe_subdev_ops = {
+	.core = &msm_vfe_subdev_core_ops,
+};
+
+int msm_vfe_subdev_init(struct v4l2_subdev *sd,
+			struct msm_cam_media_controller *mctl)
+{
+	int rc = 0;
+	struct vfe40_ctrl_type *vfe40_ctrl =
+		(struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+	v4l2_set_subdev_hostdata(sd, mctl);
+
+	spin_lock_init(&vfe40_ctrl->share_ctrl->stop_flag_lock);
+	spin_lock_init(&vfe40_ctrl->state_lock);
+	spin_lock_init(&vfe40_ctrl->io_lock);
+	spin_lock_init(&vfe40_ctrl->update_ack_lock);
+	spin_lock_init(&vfe40_ctrl->stats_bufq_lock);
+
+
+	vfe40_ctrl->update_linear = false;
+	vfe40_ctrl->update_rolloff = false;
+	vfe40_ctrl->update_la = false;
+	vfe40_ctrl->update_gamma = false;
+	vfe40_ctrl->hfr_mode = HFR_MODE_OFF;
+
+	return rc;
+}
+
+void msm_vfe_subdev_release(struct v4l2_subdev *sd)
+{
+	struct vfe40_ctrl_type *vfe40_ctrl =
+		(struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+	if (!vfe40_ctrl->share_ctrl->vfebase)
+		vfe40_ctrl->share_ctrl->vfebase = NULL;
+}
+
+static const struct v4l2_subdev_internal_ops msm_vfe_internal_ops;
+
+static int __devinit vfe40_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct axi_ctrl_t *axi_ctrl;
+	struct vfe40_ctrl_type *vfe40_ctrl;
+	struct vfe_share_ctrl_t *share_ctrl;
+	struct msm_cam_subdev_info sd_info;
+	CDBG("%s: device id = %d\n", __func__, pdev->id);
+
+	share_ctrl = kzalloc(sizeof(struct vfe_share_ctrl_t), GFP_KERNEL);
+	if (!share_ctrl) {
+		pr_err("%s: no enough memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	axi_ctrl = kzalloc(sizeof(struct axi_ctrl_t), GFP_KERNEL);
+	if (!axi_ctrl) {
+		pr_err("%s: no enough memory\n", __func__);
+		kfree(share_ctrl);
+		return -ENOMEM;
+	}
+
+	vfe40_ctrl = kzalloc(sizeof(struct vfe40_ctrl_type), GFP_KERNEL);
+	if (!vfe40_ctrl) {
+		pr_err("%s: no enough memory\n", __func__);
+		kfree(share_ctrl);
+		kfree(axi_ctrl);
+		return -ENOMEM;
+	}
+
+	if (pdev->dev.of_node)
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+
+	share_ctrl->axi_ctrl = axi_ctrl;
+	share_ctrl->vfe40_ctrl = vfe40_ctrl;
+	axi_ctrl->share_ctrl = share_ctrl;
+	vfe40_ctrl->share_ctrl = share_ctrl;
+	axi_ctrl->pdev = pdev;
+	vfe40_axi_probe(axi_ctrl);
+
+	v4l2_subdev_init(&vfe40_ctrl->subdev, &msm_vfe_subdev_ops);
+	vfe40_ctrl->subdev.internal_ops = &msm_vfe_internal_ops;
+	vfe40_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(vfe40_ctrl->subdev.name,
+			 sizeof(vfe40_ctrl->subdev.name), "vfe4.0");
+	v4l2_set_subdevdata(&vfe40_ctrl->subdev, vfe40_ctrl);
+	platform_set_drvdata(pdev, &vfe40_ctrl->subdev);
+
+	axi_ctrl->vfemem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "vfe");
+	if (!axi_ctrl->vfemem) {
+		pr_err("%s: no mem resource?\n", __func__);
+		rc = -ENODEV;
+		goto vfe40_no_resource;
+	}
+	axi_ctrl->vfeirq = platform_get_resource_byname(pdev,
+					IORESOURCE_IRQ, "vfe");
+	if (!axi_ctrl->vfeirq) {
+		pr_err("%s: no irq resource?\n", __func__);
+		rc = -ENODEV;
+		goto vfe40_no_resource;
+	}
+
+	axi_ctrl->vfeio = request_mem_region(axi_ctrl->vfemem->start,
+		resource_size(axi_ctrl->vfemem), pdev->name);
+	if (!axi_ctrl->vfeio) {
+		pr_err("%s: no valid mem region\n", __func__);
+		rc = -EBUSY;
+		goto vfe40_no_resource;
+	}
+
+	rc = request_irq(axi_ctrl->vfeirq->start, vfe40_parse_irq,
+		IRQF_TRIGGER_RISING, "vfe", axi_ctrl);
+	if (rc < 0) {
+		release_mem_region(axi_ctrl->vfemem->start,
+			resource_size(axi_ctrl->vfemem));
+		pr_err("%s: irq request fail\n", __func__);
+		rc = -EBUSY;
+		goto vfe40_no_resource;
+	}
+
+	disable_irq(axi_ctrl->vfeirq->start);
+
+	tasklet_init(&axi_ctrl->vfe40_tasklet,
+		axi40_do_tasklet, (unsigned long)axi_ctrl);
+
+	vfe40_ctrl->pdev = pdev;
+	sd_info.sdev_type = VFE_DEV;
+	sd_info.sd_index = pdev->id;
+	sd_info.irq_num = axi_ctrl->vfeirq->start;
+	msm_cam_register_subdev_node(&vfe40_ctrl->subdev, &sd_info);
+	return 0;
+
+vfe40_no_resource:
+	kfree(vfe40_ctrl);
+	kfree(axi_ctrl);
+	return 0;
+}
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+	{.compatible = "qcom,vfe40"},
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+static struct platform_driver vfe40_driver = {
+	.probe = vfe40_probe,
+	.driver = {
+		.name = MSM_VFE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_vfe_dt_match,
+	},
+};
+
+static int __init msm_vfe40_init_module(void)
+{
+	return platform_driver_register(&vfe40_driver);
+}
+
+static void __exit msm_vfe40_exit_module(void)
+{
+	platform_driver_unregister(&vfe40_driver);
+}
+
+module_init(msm_vfe40_init_module);
+module_exit(msm_vfe40_exit_module);
+MODULE_DESCRIPTION("VFE 4.0 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/vfe/msm_vfe40.h b/drivers/media/video/msm/vfe/msm_vfe40.h
new file mode 100644
index 0000000..c8b0cb8
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40.h
@@ -0,0 +1,1202 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE40_H__
+#define __MSM_VFE40_H__
+
+#include <linux/bitops.h>
+#include "msm_vfe_stats_buf.h"
+
+#define TRUE  1
+#define FALSE 0
+
+#define VFE40_HW_NUMBER 0x10000015
+
+/* This defines total number registers in VFE.
+ * Each register is 4 bytes so to get the range,
+ * multiply this number with 4. */
+#define VFE40_REGISTER_TOTAL 0x00000320
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x10:
+ * disable image data capture immediately. */
+#define CAMIF_COMMAND_STOP_IMMEDIATELY  0x00000002
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x00:
+ * disable image data capture at frame boundary */
+#define CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY  0x00000000
+
+/* to halt axi bridge */
+#define AXI_HALT  0x00000001
+
+/* clear the halt bit. */
+#define AXI_HALT_CLEAR  0x00000000
+
+/* reset the pipeline when stop command is issued.
+ * (without reset the register.) bit 26-32 = 0,
+ * domain reset, bit 0-9 = 1 for module reset, except
+ * register module. */
+#define VFE_RESET_UPON_STOP_CMD  0x000003ef
+
+/* reset the pipeline when reset command.
+ * bit 26-32 = 0, domain reset, bit 0-9 = 1 for module reset. */
+#define VFE_RESET_UPON_RESET_CMD  0x000001ff
+
+/* constants for irq registers */
+#define VFE_DISABLE_ALL_IRQS 0
+/* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS.  */
+#define VFE_CLEAR_ALL_IRQ0   0xffff7fff
+#define VFE_CLEAR_ALL_IRQ1   0xffffffff
+
+#define VFE_IRQ_STATUS0_CAMIF_SOF_MASK            (0x00000001<<0)
+#define VFE_IRQ_STATUS0_REG_UPDATE_MASK           (0x00000001<<4)
+#define VFE_IRQ_STATUS0_STATS_BE                  (0x00000001<<16)
+#define VFE_IRQ_STATUS0_STATS_BG                  (0x00000001<<17)
+#define VFE_IRQ_STATUS0_STATS_BF                  (0x00000001<<18)
+#define VFE_IRQ_STATUS0_STATS_AWB                 (0x00000001<<19)
+#define VFE_IRQ_STATUS0_STATS_RS                  (0x00000001<<20)
+#define VFE_IRQ_STATUS0_STATS_CS                  (0x00000001<<21)
+#define VFE_IRQ_STATUS0_STATS_IHIST               (0x00000001<<22)
+#define VFE_IRQ_STATUS0_STATS_SKIN_BHIST          (0x00000001<<23)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK (0x00000001<<25)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK (0x00000001<<26)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK (0x00000001<<27)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE3_MASK (0x00000001<<28)
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0     (0x00000001<<29)
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_1     (0x00000001<<30)
+#define VFE_IRQ_STATUS0_RESET_AXI_HALT_ACK_MASK   (0x00000001<<31)
+
+#define VFE_IRQ_STATUS1_SYNC_TIMER0               (0x00000001<<25)
+#define VFE_IRQ_STATUS1_SYNC_TIMER1               (0x00000001<<26)
+#define VFE_IRQ_STATUS1_SYNC_TIMER2               (0x00000001<<27)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER0              (0x00000001<<28)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER1              (0x00000001<<29)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER2              (0x00000001<<30)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER3              (0x00000001<<31)
+
+/* imask for while waiting for stop ack,  driver has already
+ * requested stop, waiting for reset irq, and async timer irq.
+ * For irq_status_0, bit 28-32 are for async timer. For
+ * irq_status_1, bit 22 for reset irq, bit 23 for axi_halt_ack
+   irq */
+#define VFE_IMASK_WHILE_STOPPING_0  0x80000000
+#define VFE_IMASK_WHILE_STOPPING_1  0x00000100
+
+/* For ABF bit 4 is set to zero and other's 1 */
+#define ABF_MASK 0xFFFFFFF7
+
+/* For DBPC bit 0 is set to zero and other's 1 */
+#define DBPC_MASK 0xFFFFFFFE
+
+/* For DBPC bit 1 is set to zero and other's 1 */
+#define DBCC_MASK 0xFFFFFFFD
+
+/* For DBPC/ABF/DBCC/ABCC bits are set to 1 all others 0 */
+#define DEMOSAIC_MASK 0xF
+
+/* For MCE enable bit 28 set to zero and other's 1 */
+#define MCE_EN_MASK 0xEFFFFFFF
+
+/* For MCE Q_K bit 28 to 32 set to zero and other's 1 */
+#define MCE_Q_K_MASK 0x0FFFFFFF
+
+#define BE_ENABLE_MASK    (0x00000001<<5)
+#define BG_ENABLE_MASK    (0x00000001<<6)
+#define BF_ENABLE_MASK    (0x00000001<<7)
+#define AWB_ENABLE_MASK   (0x00000001<<8)
+#define RS_ENABLE_MASK    (0x00000001<<9)
+#define CS_ENABLE_MASK    (0x00000001<<10)
+#define CLF_ENABLE_MASK   (0x00000001<<12)
+#define IHIST_ENABLE_MASK (0x00000001<<15)
+#define RS_CS_ENABLE_MASK (RS_ENABLE_MASK|CS_ENABLE_MASK)
+#define STATS_ENABLE_MASK 0x000487E0   /* bit 18,15,10,9,8,7,6,5*/
+
+#define VFE_DMI_CFG_DEFAULT              0x00000100
+
+#define HFR_MODE_OFF 1
+#define VFE_FRAME_SKIP_PERIOD_MASK 0x0000001F /*bits 0 -4*/
+
+enum VFE40_DMI_RAM_SEL {
+	NO_MEM_SELECTED          = 0,
+	BLACK_LUT_RAM_BANK0      = 0x1,
+	BLACK_LUT_RAM_BANK1      = 0x2,
+	ROLLOFF_RAM0_BANK0       = 0x3,
+	ROLLOFF_RAM0_BANK1       = 0x4,
+	DEMOSAIC_LUT_RAM_BANK0   = 0x5,
+	DEMOSAIC_LUT_RAM_BANK1   = 0x6,
+	STATS_BHIST_RAM0         = 0x7,
+	STATS_BHIST_RAM1         = 0x8,
+	RGBLUT_RAM_CH0_BANK0     = 0x9,
+	RGBLUT_RAM_CH0_BANK1     = 0xa,
+	RGBLUT_RAM_CH1_BANK0     = 0xb,
+	RGBLUT_RAM_CH1_BANK1     = 0xc,
+	RGBLUT_RAM_CH2_BANK0     = 0xd,
+	RGBLUT_RAM_CH2_BANK1     = 0xe,
+	RGBLUT_CHX_BANK0         = 0xf,
+	RGBLUT_CHX_BANK1         = 0x10,
+	STATS_IHIST_RAM          = 0x11,
+	LUMA_ADAPT_LUT_RAM_BANK0 = 0x12,
+	LUMA_ADAPT_LUT_RAM_BANK1 = 0x13,
+};
+
+enum vfe_output_state {
+	VFE_STATE_IDLE,
+	VFE_STATE_START_REQUESTED,
+	VFE_STATE_STARTED,
+	VFE_STATE_STOP_REQUESTED,
+	VFE_STATE_STOPPED,
+};
+
+#define V40_CAMIF_OFF             0x000002F8
+#define V40_CAMIF_LEN             36
+
+#define V40_DEMUX_OFF             0x00000424
+#define V40_DEMUX_LEN             28
+
+#define V40_DEMOSAICV3_0_OFF      0x00000440
+#define V40_DEMOSAICV3_0_LEN      4
+#define V40_DEMOSAICV3_1_OFF      0x00000518
+#define V40_DEMOSAICV3_1_LEN      88
+#define V40_DEMOSAICV3_2_OFF      0x00000568
+#define V40_DEMOSAICV3_UP_REG_CNT 5
+
+#define V40_OUT_CLAMP_OFF         0x00000874
+#define V40_OUT_CLAMP_LEN         16
+
+#define V40_OPERATION_CFG_LEN     44
+
+#define V40_AXI_OUT_OFF           0x0000004C
+#define V40_AXI_OUT_LEN           412
+#define V40_AXI_CH_INF_LEN        32
+#define V40_AXI_CFG_LEN           71
+
+#define V40_FOV_ENC_OFF           0x00000854
+#define V40_FOV_ENC_LEN           16
+#define V40_FOV_VIEW_OFF          0x00000864
+#define V40_FOV_VIEW_LEN          16
+
+#define V40_SCALER_ENC_OFF 0x0000075C
+#define V40_SCALER_ENC_LEN 72
+
+#define V40_SCALER_VIEW_OFF 0x000007A4
+#define V40_SCALER_VIEW_LEN 72
+
+#define V40_COLORXFORM_ENC_CFG_OFF 0x0000071C
+#define V40_COLORXFORM_ENC_CFG_LEN 32
+
+#define V40_COLORXFORM_VIEW_CFG_OFF 0x0000073C
+#define V40_COLORXFORM_VIEW_CFG_LEN 32
+
+#define V40_CHROMA_EN_OFF 0x00000640
+#define V40_CHROMA_EN_LEN 36
+
+#define V40_SYNC_TIMER_OFF      0x00000324
+#define V40_SYNC_TIMER_POLARITY_OFF 0x0000034C
+#define V40_TIMER_SELECT_OFF        0x00000374
+#define V40_SYNC_TIMER_LEN 28
+
+#define V40_ASYNC_TIMER_OFF 0x00000350
+#define V40_ASYNC_TIMER_LEN 28
+
+/* use 10x13 mesh table in vfe40*/
+#define V40_MESH_ROLL_OFF_CFG_OFF             0x00000400
+#define V40_MESH_ROLL_OFF_CFG_LEN             36
+#define V40_MESH_ROLL_OFF_TABLE_SIZE          130
+
+
+#define V40_COLOR_COR_OFF 0x000005D0
+#define V40_COLOR_COR_LEN 52
+
+#define V40_WB_OFF 0x00000580
+#define V40_WB_LEN 4
+
+#define V40_RGB_G_OFF 0x00000638
+#define V40_RGB_G_LEN 4
+#define V40_GAMMA_LUT_BANK_SEL_MASK           0x00000007
+
+#define V40_LA_OFF 0x0000063C
+#define V40_LA_LEN 4
+
+#define V40_SCE_OFF 0x00000694
+#define V40_SCE_LEN 136
+
+#define V40_CHROMA_SUP_OFF 0x00000664
+#define V40_CHROMA_SUP_LEN 12
+
+#define V40_MCE_OFF 0x00000670
+#define V40_MCE_LEN 36
+
+#define V40_STATS_BE_OFF 0x0000088C
+#define V40_STATS_BE_LEN 12
+
+#define V40_STATS_BG_OFF 0x00000898
+#define V40_STATS_BG_LEN 12
+
+#define V40_STATS_BF_OFF 0x000008A4
+#define V40_STATS_BF_LEN 24
+
+#define V40_STATS_BHIST_OFF 0x000008BC
+#define V40_STATS_BHIST_LEN 8
+
+#define V40_STATS_AWB_OFF 0x000008C4
+#define V40_STATS_AWB_LEN 32
+
+#define V40_STATS_RS_OFF 0x000008E4
+#define V40_STATS_RS_LEN 8
+
+#define V40_STATS_CS_OFF 0x000008EC
+#define V40_STATS_CS_LEN 8
+
+#define V40_STATS_IHIST_OFF 0x000008F4
+#define V40_STATS_IHIST_LEN 8
+
+#define V40_STATS_SKIN_OFF 0x000008FC
+#define V40_STATS_SKIN_LEN 20
+
+#define V40_ASF_OFF 0x000007EC
+#define V40_ASF_LEN 48
+#define V40_ASF_UPDATE_LEN 36
+
+#define V40_CAPTURE_LEN 4
+
+#define V40_GET_HW_VERSION_OFF 0
+#define V40_GET_HW_VERSION_LEN 4
+
+#define V40_LINEARIZATION_OFF1 0x0000037C
+#define V40_LINEARIZATION_LEN1 68
+
+#define V40_DEMOSAICV3_DBPC_CFG_OFF  0x00000444
+#define V40_DEMOSAICV3_DBPC_LEN 4
+
+#define V40_DEMOSAICV3_DBPC_CFG_OFF0 0x00000448
+#define V40_DEMOSAICV3_DBPC_CFG_OFF1 0x0000044C
+#define V40_DEMOSAICV3_DBPC_CFG_OFF2 0x00000450
+
+#define V40_DEMOSAICV3_DBCC_OFF 0x00000454
+#define V40_DEMOSAICV3_DBCC_LEN 16
+
+#define V40_DEMOSAICV3_ABF_OFF 0x00000464
+#define V40_DEMOSAICV3_ABF_LEN 180
+
+#define V40_MODULE_CFG_OFF 0x00000018
+#define V40_MODULE_CFG_LEN 4
+
+#define V40_ASF_SPECIAL_EFX_CFG_OFF 0x0000081C
+#define V40_ASF_SPECIAL_EFX_CFG_LEN 4
+
+#define V40_CLF_CFG_OFF 0x00000588
+#define V40_CLF_CFG_LEN 72
+
+#define V40_CLF_LUMA_UPDATE_OFF 0x0000058C
+#define V40_CLF_LUMA_UPDATE_LEN 60
+
+#define V40_CLF_CHROMA_UPDATE_OFF 0x000005C8
+#define V40_CLF_CHROMA_UPDATE_LEN 8
+
+#define VFE40_GAMMA_NUM_ENTRIES  64
+
+#define VFE40_LA_TABLE_LENGTH    64
+
+#define VFE40_LINEARIZATON_TABLE_LENGTH    36
+
+#define VFE_WM_CFG_BASE 0x0070
+#define VFE_WM_CFG_LEN 0x0024
+
+#define vfe40_get_ch_ping_addr(base, chn) \
+	(msm_camera_io_r((base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn)))
+#define vfe40_get_ch_pong_addr(base, chn) \
+	(msm_camera_io_r((base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn) + 4))
+#define vfe40_get_ch_addr(ping_pong, base, chn) \
+	((((ping_pong) & (1 << (chn))) == 0) ? \
+	(vfe40_get_ch_pong_addr((base), chn)) : \
+	(vfe40_get_ch_ping_addr((base), chn)))
+
+#define vfe40_put_ch_ping_addr(base, chn, addr) \
+	(msm_camera_io_w((addr), \
+	(base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn)))
+#define vfe40_put_ch_pong_addr(base, chn, addr) \
+	(msm_camera_io_w((addr), \
+	(base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn) + 4))
+#define vfe40_put_ch_addr(ping_pong, base, chn, addr) \
+	(((ping_pong) & (1 << (chn))) == 0 ?   \
+	vfe40_put_ch_pong_addr((base), (chn), (addr)) : \
+	vfe40_put_ch_ping_addr((base), (chn), (addr)))
+
+struct vfe_cmd_hw_version {
+	uint32_t minorVersion;
+	uint32_t majorVersion;
+	uint32_t coreVersion;
+};
+
+enum VFE_AXI_OUTPUT_MODE {
+	VFE_AXI_OUTPUT_MODE_Output1,
+	VFE_AXI_OUTPUT_MODE_Output2,
+	VFE_AXI_OUTPUT_MODE_Output1AndOutput2,
+	VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2,
+	VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1,
+	VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2,
+	VFE_AXI_LAST_OUTPUT_MODE_ENUM
+};
+
+enum VFE_RAW_WR_PATH_SEL {
+	VFE_RAW_OUTPUT_DISABLED,
+	VFE_RAW_OUTPUT_ENC_CBCR_PATH,
+	VFE_RAW_OUTPUT_VIEW_CBCR_PATH,
+	VFE_RAW_OUTPUT_PATH_INVALID
+};
+
+
+#define VFE_AXI_OUTPUT_BURST_LENGTH     4
+#define VFE_MAX_NUM_FRAGMENTS_PER_FRAME 4
+#define VFE_AXI_OUTPUT_CFG_FRAME_COUNT  3
+
+struct vfe_cmds_per_write_master {
+	uint16_t imageWidth;
+	uint16_t imageHeight;
+	uint16_t outRowCount;
+	uint16_t outRowIncrement;
+	uint32_t outFragments[VFE_AXI_OUTPUT_CFG_FRAME_COUNT]
+		[VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+};
+
+struct vfe_cmds_axi_per_output_path {
+	uint8_t fragmentCount;
+	struct vfe_cmds_per_write_master firstWM;
+	struct vfe_cmds_per_write_master secondWM;
+};
+
+enum VFE_AXI_BURST_LENGTH {
+	VFE_AXI_BURST_LENGTH_IS_2  = 2,
+	VFE_AXI_BURST_LENGTH_IS_4  = 4,
+	VFE_AXI_BURST_LENGTH_IS_8  = 8,
+	VFE_AXI_BURST_LENGTH_IS_16 = 16
+};
+
+
+struct vfe_cmd_fov_crop_config {
+	uint8_t enable;
+	uint16_t firstPixel;
+	uint16_t lastPixel;
+	uint16_t firstLine;
+	uint16_t lastLine;
+};
+
+struct vfe_cmds_main_scaler_stripe_init {
+	uint16_t MNCounterInit;
+	uint16_t phaseInit;
+};
+
+struct vfe_cmds_scaler_one_dimension {
+	uint8_t  enable;
+	uint16_t inputSize;
+	uint16_t outputSize;
+	uint32_t phaseMultiplicationFactor;
+	uint8_t  interpolationResolution;
+};
+
+struct vfe_cmd_main_scaler_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension    hconfig;
+	struct vfe_cmds_scaler_one_dimension    vconfig;
+	struct vfe_cmds_main_scaler_stripe_init MNInitH;
+	struct vfe_cmds_main_scaler_stripe_init MNInitV;
+};
+
+struct vfe_cmd_scaler2_config {
+	uint8_t enable;
+	struct vfe_cmds_scaler_one_dimension hconfig;
+	struct vfe_cmds_scaler_one_dimension vconfig;
+};
+
+
+struct vfe_cmd_frame_skip_update {
+	uint32_t output1Pattern;
+	uint32_t output2Pattern;
+};
+
+struct vfe_cmd_output_clamp_config {
+	uint8_t minCh0;
+	uint8_t minCh1;
+	uint8_t minCh2;
+	uint8_t maxCh0;
+	uint8_t maxCh1;
+	uint8_t maxCh2;
+};
+
+struct vfe_cmd_chroma_subsample_config {
+	uint8_t enable;
+	uint8_t cropEnable;
+	uint8_t vsubSampleEnable;
+	uint8_t hsubSampleEnable;
+	uint8_t vCosited;
+	uint8_t hCosited;
+	uint8_t vCositedPhase;
+	uint8_t hCositedPhase;
+	uint16_t cropWidthFirstPixel;
+	uint16_t cropWidthLastPixel;
+	uint16_t cropHeightFirstLine;
+	uint16_t cropHeightLastLine;
+};
+
+enum VFE_START_PIXEL_PATTERN {
+	VFE_BAYER_RGRGRG,
+	VFE_BAYER_GRGRGR,
+	VFE_BAYER_BGBGBG,
+	VFE_BAYER_GBGBGB,
+	VFE_YUV_YCbYCr,
+	VFE_YUV_YCrYCb,
+	VFE_YUV_CbYCrY,
+	VFE_YUV_CrYCbY
+};
+
+enum VFE_BUS_RD_INPUT_PIXEL_PATTERN {
+	VFE_BAYER_RAW,
+	VFE_YUV_INTERLEAVED,
+	VFE_YUV_PSEUDO_PLANAR_Y,
+	VFE_YUV_PSEUDO_PLANAR_CBCR
+};
+
+enum VFE_YUV_INPUT_COSITING_MODE {
+	VFE_YUV_COSITED,
+	VFE_YUV_INTERPOLATED
+};
+
+struct vfe_cmds_demosaic_abf {
+	uint8_t   enable;
+	uint8_t   forceOn;
+	uint8_t   shift;
+	uint16_t  lpThreshold;
+	uint16_t  max;
+	uint16_t  min;
+	uint8_t   ratio;
+};
+
+struct vfe_cmds_demosaic_bpc {
+	uint8_t   enable;
+	uint16_t  fmaxThreshold;
+	uint16_t  fminThreshold;
+	uint16_t  redDiffThreshold;
+	uint16_t  blueDiffThreshold;
+	uint16_t  greenDiffThreshold;
+};
+
+struct vfe_cmd_demosaic_config {
+	uint8_t   enable;
+	uint8_t   slopeShift;
+	struct vfe_cmds_demosaic_abf abfConfig;
+	struct vfe_cmds_demosaic_bpc bpcConfig;
+};
+
+struct vfe_cmd_demosaic_bpc_update {
+	struct vfe_cmds_demosaic_bpc bpcUpdate;
+};
+
+struct vfe_cmd_demosaic_abf_update {
+	struct vfe_cmds_demosaic_abf abfUpdate;
+};
+
+struct vfe_cmd_white_balance_config {
+	uint8_t  enable;
+	uint16_t ch2Gain;
+	uint16_t ch1Gain;
+	uint16_t ch0Gain;
+};
+
+enum VFE_COLOR_CORRECTION_COEF_QFACTOR {
+	COEF_IS_Q7_SIGNED,
+	COEF_IS_Q8_SIGNED,
+	COEF_IS_Q9_SIGNED,
+	COEF_IS_Q10_SIGNED
+};
+
+struct vfe_cmd_color_correction_config {
+	uint8_t     enable;
+	enum VFE_COLOR_CORRECTION_COEF_QFACTOR coefQFactor;
+	int16_t  C0;
+	int16_t  C1;
+	int16_t  C2;
+	int16_t  C3;
+	int16_t  C4;
+	int16_t  C5;
+	int16_t  C6;
+	int16_t  C7;
+	int16_t  C8;
+	int16_t  K0;
+	int16_t  K1;
+	int16_t  K2;
+};
+
+#define VFE_LA_TABLE_LENGTH 64
+
+struct vfe_cmd_la_config {
+	uint8_t enable;
+	int16_t table[VFE_LA_TABLE_LENGTH];
+};
+
+#define VFE_GAMMA_TABLE_LENGTH 256
+enum VFE_RGB_GAMMA_TABLE_SELECT {
+	RGB_GAMMA_CH0_SELECTED,
+	RGB_GAMMA_CH1_SELECTED,
+	RGB_GAMMA_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_SELECTED,
+	RGB_GAMMA_CH0_CH2_SELECTED,
+	RGB_GAMMA_CH1_CH2_SELECTED,
+	RGB_GAMMA_CH0_CH1_CH2_SELECTED
+};
+
+struct vfe_cmd_rgb_gamma_config {
+	uint8_t enable;
+	enum VFE_RGB_GAMMA_TABLE_SELECT channelSelect;
+	int16_t table[VFE_GAMMA_TABLE_LENGTH];
+};
+
+struct vfe_cmd_chroma_enhan_config {
+	uint8_t  enable;
+	int16_t am;
+	int16_t ap;
+	int16_t bm;
+	int16_t bp;
+	int16_t cm;
+	int16_t cp;
+	int16_t dm;
+	int16_t dp;
+	int16_t kcr;
+	int16_t kcb;
+	int16_t RGBtoYConversionV0;
+	int16_t RGBtoYConversionV1;
+	int16_t RGBtoYConversionV2;
+	uint8_t RGBtoYConversionOffset;
+};
+
+struct vfe_cmd_chroma_suppression_config {
+	uint8_t enable;
+	uint8_t m1;
+	uint8_t m3;
+	uint8_t n1;
+	uint8_t n3;
+	uint8_t nn1;
+	uint8_t mm1;
+};
+
+struct vfe_cmd_asf_config {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t sharpThreshE2;
+	int8_t sharpThreshE3;
+	int8_t sharpThreshE4;
+	int8_t sharpThreshE5;
+	int8_t filter1Coefficients[9];
+	int8_t filter2Coefficients[9];
+	uint8_t  cropEnable;
+	uint16_t cropFirstPixel;
+	uint16_t cropLastPixel;
+	uint16_t cropFirstLine;
+	uint16_t cropLastLine;
+};
+
+struct vfe_cmd_asf_update {
+	uint8_t enable;
+	uint8_t smoothFilterEnabled;
+	uint8_t sharpMode;
+	uint8_t smoothCoefCenter;
+	uint8_t smoothCoefSurr;
+	uint8_t normalizeFactor;
+	uint8_t sharpK1;
+	uint8_t sharpK2;
+	uint8_t sharpThreshE1;
+	int8_t  sharpThreshE2;
+	int8_t  sharpThreshE3;
+	int8_t  sharpThreshE4;
+	int8_t  sharpThreshE5;
+	int8_t  filter1Coefficients[9];
+	int8_t  filter2Coefficients[9];
+	uint8_t cropEnable;
+};
+
+enum VFE_TEST_GEN_SYNC_EDGE {
+	VFE_TEST_GEN_SYNC_EDGE_ActiveHigh,
+	VFE_TEST_GEN_SYNC_EDGE_ActiveLow
+};
+
+
+struct vfe_cmd_bus_pm_start {
+	uint8_t output2YWrPmEnable;
+	uint8_t output2CbcrWrPmEnable;
+	uint8_t output1YWrPmEnable;
+	uint8_t output1CbcrWrPmEnable;
+};
+
+struct  vfe_frame_skip_counts {
+	uint32_t  totalFrameCount;
+	uint32_t  output1Count;
+	uint32_t  output2Count;
+};
+
+enum VFE_AXI_RD_UNPACK_HBI_SEL {
+	VFE_AXI_RD_HBI_32_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_64_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_128_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_256_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_512_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_1024_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_2048_CLOCK_CYCLES,
+	VFE_AXI_RD_HBI_4096_CLOCK_CYCLES
+};
+
+struct vfe_frame_bpc_info {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+};
+
+struct vfe_frame_asf_info {
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+};
+
+struct vfe_msg_camif_status {
+	uint8_t  camifState;
+	uint32_t pixelCount;
+	uint32_t lineCount;
+};
+
+struct vfe40_irq_status {
+	uint32_t vfeIrqStatus0;
+	uint32_t vfeIrqStatus1;
+	uint32_t camifStatus;
+	uint32_t demosaicStatus;
+	uint32_t asfMaxEdge;
+};
+
+#define V40_PREVIEW_AXI_FLAG  0x00000001
+#define V40_SNAPSHOT_AXI_FLAG (0x00000001<<1)
+
+struct vfe40_cmd_type {
+	uint16_t id;
+	uint32_t length;
+	uint32_t offset;
+	uint32_t flag;
+};
+
+struct vfe40_free_buf {
+	struct list_head node;
+	uint32_t paddr;
+	uint32_t y_off;
+	uint32_t cbcr_off;
+};
+
+struct vfe40_output_ch {
+	struct list_head free_buf_queue;
+	spinlock_t free_buf_lock;
+	uint16_t image_mode;
+	int8_t ch0;
+	int8_t ch1;
+	int8_t ch2;
+	uint32_t  capture_cnt;
+	uint32_t  frame_drop_cnt;
+	struct msm_free_buf ping;
+	struct msm_free_buf pong;
+	struct msm_free_buf free_buf;
+};
+
+/* no error irq in mask 0 */
+#define VFE40_IMASK_ERROR_ONLY_0  0x0
+/* when normal case, don't want to block error status. */
+/* bit 0-21 are error irq bits */
+#define VFE40_IMASK_ERROR_ONLY_1               0x005FFFFF
+#define VFE40_IMASK_CAMIF_ERROR               (0x00000001<<0)
+#define VFE40_IMASK_BHIST_OVWR                (0x00000001<<1)
+#define VFE40_IMASK_STATS_CS_OVWR             (0x00000001<<2)
+#define VFE40_IMASK_STATS_IHIST_OVWR          (0x00000001<<3)
+#define VFE40_IMASK_REALIGN_BUF_Y_OVFL        (0x00000001<<4)
+#define VFE40_IMASK_REALIGN_BUF_CB_OVFL       (0x00000001<<5)
+#define VFE40_IMASK_REALIGN_BUF_CR_OVFL       (0x00000001<<6)
+#define VFE40_IMASK_VIOLATION                 (0x00000001<<7)
+#define VFE40_IMASK_IMG_MAST_0_BUS_OVFL       (0x00000001<<8)
+#define VFE40_IMASK_IMG_MAST_1_BUS_OVFL       (0x00000001<<9)
+#define VFE40_IMASK_IMG_MAST_2_BUS_OVFL       (0x00000001<<10)
+#define VFE40_IMASK_IMG_MAST_3_BUS_OVFL       (0x00000001<<11)
+#define VFE40_IMASK_IMG_MAST_4_BUS_OVFL       (0x00000001<<12)
+#define VFE40_IMASK_IMG_MAST_5_BUS_OVFL       (0x00000001<<13)
+#define VFE40_IMASK_IMG_MAST_6_BUS_OVFL       (0x00000001<<14)
+#define VFE40_IMASK_STATS_AE_BG_BUS_OVFL      (0x00000001<<15)
+#define VFE40_IMASK_STATS_AF_BF_BUS_OVFL      (0x00000001<<16)
+#define VFE40_IMASK_STATS_AWB_BUS_OVFL        (0x00000001<<17)
+#define VFE40_IMASK_STATS_RS_BUS_OVFL         (0x00000001<<18)
+#define VFE40_IMASK_STATS_CS_BUS_OVFL         (0x00000001<<19)
+#define VFE40_IMASK_STATS_IHIST_BUS_OVFL      (0x00000001<<20)
+#define VFE40_IMASK_STATS_SKIN_BHIST_BUS_OVFL (0x00000001<<21)
+#define VFE40_IMASK_AXI_ERROR                 (0x00000001<<22)
+
+#define VFE_COM_STATUS 0x000FE000
+
+struct vfe40_output_path {
+	uint16_t output_mode;     /* bitmask  */
+
+	struct vfe40_output_ch out0; /* preview and thumbnail */
+	struct vfe40_output_ch out1; /* snapshot */
+	struct vfe40_output_ch out2; /* video    */
+};
+
+struct vfe40_frame_extra {
+	uint32_t greenDefectPixelCount;
+	uint32_t redBlueDefectPixelCount;
+
+	uint32_t  asfMaxEdge;
+	uint32_t  asfHbiCount;
+
+	uint32_t yWrPmStats0;
+	uint32_t yWrPmStats1;
+	uint32_t cbcrWrPmStats0;
+	uint32_t cbcrWrPmStats1;
+
+	uint32_t  frameCounter;
+};
+
+#define VFE_CLEAR_ALL_IRQS              0xffffffff
+
+#define VFE_HW_VERSION			        0x00000000
+#define VFE_GLOBAL_RESET                0x0000000C
+#define VFE_MODULE_RESET                0x00000010
+#define VFE_CGC_OVERRIDE                0x00000014
+#define VFE_MODULE_CFG                  0x00000018
+#define VFE_CFG				            0x0000001C
+#define VFE_IRQ_CMD                     0x00000024
+#define VFE_IRQ_MASK_0                  0x00000028
+#define VFE_IRQ_MASK_1                  0x0000002C
+#define VFE_IRQ_CLEAR_0                 0x00000030
+#define VFE_IRQ_CLEAR_1                 0x00000034
+#define VFE_IRQ_STATUS_0                0x00000038
+#define VFE_IRQ_STATUS_1                0x0000003C
+#define VFE_IRQ_COMP_MASK               0x00000040
+#define VFE_BUS_CMD                     0x0000004C
+#define VFE_BUS_PING_PONG_STATUS        0x00000180
+#define VFE_AXI_CMD                     0x000001D8
+#define VFE_AXI_STATUS        0x000002C0
+#define VFE_BUS_STATS_PING_PONG_BASE    0x000000F4
+
+#define VFE_BUS_STATS_AEC_WR_PING_ADDR    0x000000F4
+#define VFE_BUS_STATS_AEC_WR_PONG_ADDR    0x000000F8
+#define VFE_BUS_STATS_AEC_UB_CFG          0x000000FC
+#define VFE_BUS_STATS_AF_WR_PING_ADDR     0x00000100
+#define VFE_BUS_STATS_AF_WR_PONG_ADDR     0x00000104
+#define VFE_BUS_STATS_AF_UB_CFG           0x00000108
+#define VFE_BUS_STATS_AWB_WR_PING_ADDR    0x0000010C
+#define VFE_BUS_STATS_AWB_WR_PONG_ADDR    0x00000110
+#define VFE_BUS_STATS_AWB_UB_CFG          0x00000114
+#define VFE_BUS_STATS_RS_WR_PING_ADDR    0x00000118
+#define VFE_BUS_STATS_RS_WR_PONG_ADDR    0x0000011C
+#define VFE_BUS_STATS_RS_UB_CFG          0x00000120
+#define VFE_BUS_STATS_CS_WR_PING_ADDR    0x00000124
+#define VFE_BUS_STATS_CS_WR_PONG_ADDR    0x00000128
+#define VFE_BUS_STATS_CS_UB_CFG          0x0000012C
+#define VFE_BUS_STATS_HIST_WR_PING_ADDR   0x00000130
+#define VFE_BUS_STATS_HIST_WR_PONG_ADDR   0x00000134
+#define VFE_BUS_STATS_HIST_UB_CFG          0x00000138
+#define VFE_BUS_STATS_SKIN_WR_PING_ADDR    0x0000013C
+#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR    0x00000140
+#define VFE_BUS_STATS_SKIN_UB_CFG          0x00000144
+
+#define VFE_0_BUS_BDG_QOS_CFG_0     0x000002C4
+#define VFE_0_BUS_BDG_QOS_CFG_1     0x000002C8
+#define VFE_0_BUS_BDG_QOS_CFG_2     0x000002CC
+#define VFE_0_BUS_BDG_QOS_CFG_3     0x000002D0
+#define VFE_0_BUS_BDG_QOS_CFG_4     0x000002D4
+#define VFE_0_BUS_BDG_QOS_CFG_5     0x000002D8
+#define VFE_0_BUS_BDG_QOS_CFG_6     0x000002DC
+#define VFE_0_BUS_BDG_QOS_CFG_7     0x000002E0
+
+#define VFE_CAMIF_COMMAND               0x000002F4
+#define VFE_CAMIF_STATUS                0x0000031C
+#define VFE_REG_UPDATE_CMD              0x00000378
+#define VFE_DEMUX_GAIN_0                0x00000428
+#define VFE_DEMUX_GAIN_1                0x0000042C
+#define VFE_CHROMA_UP                   0x0000057C
+
+#define VFE_CLAMP_ENC_MAX               0x00000874
+#define VFE_CLAMP_ENC_MIN               0x00000878
+#define VFE_CLAMP_VIEW_MAX              0x0000087C
+#define VFE_CLAMP_VIEW_MIN              0x00000880
+
+#define VFE_REALIGN_BUF                 0x00000884
+#define VFE_STATS_CFG                   0x00000888
+#define VFE_STATS_AWB_SGW_CFG           0x000008CC
+#define VFE_DMI_CFG                     0x00000910
+#define VFE_DMI_ADDR                    0x00000914
+#define VFE_DMI_DATA_LO                 0x0000091C
+#define VFE_BUS_IO_FORMAT_CFG           0x00000054
+#define VFE_RDI0_CFG                    0x000002E8
+#define VFE_RDI1_CFG                    0x000002EC
+#define VFE_RDI2_CFG                    0x000002F0
+
+#define VFE_VIOLATION_STATUS            0x000007B4
+
+#define VFE40_DMI_DATA_HI               0x00000918
+#define VFE40_DMI_DATA_LO               0x0000091C
+
+#define VFE40_OUTPUT_MODE_PT			BIT(0)
+#define VFE40_OUTPUT_MODE_S			BIT(1)
+#define VFE40_OUTPUT_MODE_V			BIT(2)
+#define VFE40_OUTPUT_MODE_P			BIT(3)
+#define VFE40_OUTPUT_MODE_T			BIT(4)
+#define VFE40_OUTPUT_MODE_P_ALL_CHNLS		BIT(5)
+#define VFE40_OUTPUT_MODE_PRIMARY		BIT(6)
+#define VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS	BIT(7)
+#define VFE40_OUTPUT_MODE_SECONDARY		BIT(8)
+#define VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS	BIT(9)
+
+struct vfe_stats_control {
+	uint32_t droppedStatsFrameCount;
+	uint32_t bufToRender;
+};
+struct axi_ctrl_t;
+struct vfe40_ctrl_type;
+
+struct vfe_share_ctrl_t {
+	void __iomem *vfebase;
+	uint32_t register_total;
+
+	atomic_t vstate;
+	uint32_t vfeFrameId;
+	uint32_t stats_comp;
+	spinlock_t  stop_flag_lock;
+	int8_t stop_ack_pending;
+	enum vfe_output_state liveshot_state;
+	uint32_t vfe_capture_count;
+
+	uint16_t operation_mode;     /* streaming or snapshot */
+	struct vfe40_output_path outpath;
+
+	uint32_t ref_count;
+	spinlock_t  sd_notify_lock;
+	uint32_t vfe_clk_rate;
+
+	atomic_t irq_cnt;
+	struct axi_ctrl_t *axi_ctrl;
+	struct vfe40_ctrl_type *vfe40_ctrl;
+};
+
+struct axi_ctrl_t {
+	struct v4l2_subdev subdev;
+	struct platform_device *pdev;
+	struct resource *vfeirq;
+	spinlock_t  tasklet_lock;
+	struct list_head tasklet_q;
+
+	void *syncdata;
+
+	struct resource	*vfemem;
+	struct resource *vfeio;
+	struct regulator *fs_vfe;
+	struct clk *vfe_clk[3];
+	struct tasklet_struct vfe40_tasklet;
+	struct vfe_share_ctrl_t *share_ctrl;
+};
+
+struct vfe40_ctrl_type {
+	uint32_t vfeImaskCompositePacked;
+
+	spinlock_t  update_ack_lock;
+	spinlock_t  state_lock;
+	spinlock_t  io_lock;
+	spinlock_t  stats_bufq_lock;
+	uint32_t extlen;
+	void *extdata;
+
+	int8_t start_ack_pending;
+	int8_t reset_ack_pending;
+	int8_t update_ack_pending;
+	enum vfe_output_state recording_state;
+	int8_t update_linear;
+	int8_t update_rolloff;
+	int8_t update_la;
+	int8_t update_gamma;
+
+	struct vfe_share_ctrl_t *share_ctrl;
+
+	uint32_t sync_timer_repeat_count;
+	uint32_t sync_timer_state;
+	uint32_t sync_timer_number;
+
+	uint32_t output1Pattern;
+	uint32_t output1Period;
+	uint32_t output2Pattern;
+	uint32_t output2Period;
+	uint32_t vfeFrameSkipCount;
+	uint32_t vfeFrameSkipPeriod;
+	struct vfe_stats_control afStatsControl;
+	struct vfe_stats_control awbStatsControl;
+	struct vfe_stats_control aecStatsControl;
+	struct vfe_stats_control ihistStatsControl;
+	struct vfe_stats_control rsStatsControl;
+	struct vfe_stats_control csStatsControl;
+
+	/* v4l2 subdev */
+	struct v4l2_subdev subdev;
+	struct platform_device *pdev;
+	uint32_t hfr_mode;
+	uint32_t frame_skip_cnt;
+	uint32_t frame_skip_pattern;
+	uint32_t snapshot_frame_cnt;
+	struct msm_stats_bufq_ctrl stats_ctrl;
+	struct msm_stats_ops stats_ops;
+};
+
+#define statsAeNum      0
+#define statsAfNum      1
+#define statsAwbNum     2
+#define statsRsNum      3
+#define statsCsNum      4
+#define statsIhistNum   5
+#define statsSkinNum    6
+
+struct vfe_cmd_stats_ack {
+	uint32_t  nextStatsBuf;
+};
+
+#define VFE_STATS_BUFFER_COUNT            3
+
+struct vfe_cmd_stats_buf {
+	uint32_t statsBuf[VFE_STATS_BUFFER_COUNT];
+};
+
+void vfe40_subdev_notify(int id, int path, int image_mode,
+	struct v4l2_subdev *sd, struct vfe_share_ctrl_t *share_ctrl);
+struct vfe40_output_ch *vfe40_get_ch(
+	int path, struct vfe_share_ctrl_t *share_ctrl);
+void vfe40_send_isp_msg(struct v4l2_subdev *sd,
+	uint32_t vfeFrameId, uint32_t isp_msg_id);
+void vfe40_axi_probe(struct axi_ctrl_t *axi_ctrl);
+
+static const uint32_t vfe40_AXI_WM_CFG[] = {
+	0x0000006C,
+	0x00000090,
+	0x000000B4,
+	0x000000D8,
+	0x000000FC,
+	0x00000120,
+	0x00000144,
+};
+
+static struct vfe40_cmd_type vfe40_cmd[] = {
+/*0*/
+	{VFE_CMD_DUMMY_0},
+	{VFE_CMD_SET_CLK},
+	{VFE_CMD_RESET},
+	{VFE_CMD_START},
+	{VFE_CMD_TEST_GEN_START},
+/*5*/
+	{VFE_CMD_OPERATION_CFG, V40_OPERATION_CFG_LEN},
+	{VFE_CMD_AXI_OUT_CFG, V40_AXI_OUT_LEN, V40_AXI_OUT_OFF, 0xFF},
+	{VFE_CMD_CAMIF_CFG, V40_CAMIF_LEN, V40_CAMIF_OFF, 0xFF},
+	{VFE_CMD_AXI_INPUT_CFG},
+	{VFE_CMD_BLACK_LEVEL_CFG},
+/*10*/
+	{VFE_CMD_MESH_ROLL_OFF_CFG},
+	{VFE_CMD_DEMUX_CFG, V40_DEMUX_LEN, V40_DEMUX_OFF, 0xFF},
+	{VFE_CMD_FOV_CFG},
+	{VFE_CMD_MAIN_SCALER_CFG},
+	{VFE_CMD_WB_CFG, V40_WB_LEN, V40_WB_OFF, 0xFF},
+/*15*/
+	{VFE_CMD_COLOR_COR_CFG, V40_COLOR_COR_LEN, V40_COLOR_COR_OFF, 0xFF},
+	{VFE_CMD_RGB_G_CFG, V40_RGB_G_LEN, V40_RGB_G_OFF, 0xFF},
+	{VFE_CMD_LA_CFG, V40_LA_LEN, V40_LA_OFF, 0xFF },
+	{VFE_CMD_CHROMA_EN_CFG, V40_CHROMA_EN_LEN, V40_CHROMA_EN_OFF, 0xFF},
+	{VFE_CMD_CHROMA_SUP_CFG, V40_CHROMA_SUP_LEN, V40_CHROMA_SUP_OFF, 0xFF},
+/*20*/
+	{VFE_CMD_MCE_CFG, V40_MCE_LEN, V40_MCE_OFF, 0xFF},
+	{VFE_CMD_SK_ENHAN_CFG, V40_SCE_LEN, V40_SCE_OFF, 0xFF},
+	{VFE_CMD_ASF_CFG, V40_ASF_LEN, V40_ASF_OFF, 0xFF},
+	{VFE_CMD_S2Y_CFG},
+	{VFE_CMD_S2CbCr_CFG},
+/*25*/
+	{VFE_CMD_CHROMA_SUBS_CFG},
+	{VFE_CMD_OUT_CLAMP_CFG, V40_OUT_CLAMP_LEN, V40_OUT_CLAMP_OFF, 0xFF},
+	{VFE_CMD_FRAME_SKIP_CFG},
+	{VFE_CMD_DUMMY_1},
+	{VFE_CMD_DUMMY_2},
+/*30*/
+	{VFE_CMD_DUMMY_3},
+	{VFE_CMD_UPDATE},
+	{VFE_CMD_BL_LVL_UPDATE},
+	{VFE_CMD_DEMUX_UPDATE, V40_DEMUX_LEN, V40_DEMUX_OFF, 0xFF},
+	{VFE_CMD_FOV_UPDATE},
+/*35*/
+	{VFE_CMD_MAIN_SCALER_UPDATE},
+	{VFE_CMD_WB_UPDATE, V40_WB_LEN, V40_WB_OFF, 0xFF},
+	{VFE_CMD_COLOR_COR_UPDATE, V40_COLOR_COR_LEN, V40_COLOR_COR_OFF, 0xFF},
+	{VFE_CMD_RGB_G_UPDATE, V40_RGB_G_LEN, V40_CHROMA_EN_OFF, 0xFF},
+	{VFE_CMD_LA_UPDATE, V40_LA_LEN, V40_LA_OFF, 0xFF },
+/*40*/
+	{VFE_CMD_CHROMA_EN_UPDATE, V40_CHROMA_EN_LEN, V40_CHROMA_EN_OFF, 0xFF},
+	{VFE_CMD_CHROMA_SUP_UPDATE, V40_CHROMA_SUP_LEN,
+		V40_CHROMA_SUP_OFF, 0xFF},
+	{VFE_CMD_MCE_UPDATE, V40_MCE_LEN, V40_MCE_OFF, 0xFF},
+	{VFE_CMD_SK_ENHAN_UPDATE, V40_SCE_LEN, V40_SCE_OFF, 0xFF},
+	{VFE_CMD_S2CbCr_UPDATE},
+/*45*/
+	{VFE_CMD_S2Y_UPDATE},
+	{VFE_CMD_ASF_UPDATE, V40_ASF_UPDATE_LEN, V40_ASF_OFF, 0xFF},
+	{VFE_CMD_FRAME_SKIP_UPDATE},
+	{VFE_CMD_CAMIF_FRAME_UPDATE},
+	{VFE_CMD_STATS_AF_UPDATE},
+/*50*/
+	{VFE_CMD_STATS_AE_UPDATE},
+	{VFE_CMD_STATS_AWB_UPDATE, V40_STATS_AWB_LEN, V40_STATS_AWB_OFF},
+	{VFE_CMD_STATS_RS_UPDATE, V40_STATS_RS_LEN, V40_STATS_RS_OFF},
+	{VFE_CMD_STATS_CS_UPDATE, V40_STATS_CS_LEN, V40_STATS_CS_OFF},
+	{VFE_CMD_STATS_SKIN_UPDATE},
+/*55*/
+	{VFE_CMD_STATS_IHIST_UPDATE, V40_STATS_IHIST_LEN, V40_STATS_IHIST_OFF},
+	{VFE_CMD_DUMMY_4},
+	{VFE_CMD_EPOCH1_ACK},
+	{VFE_CMD_EPOCH2_ACK},
+	{VFE_CMD_START_RECORDING},
+/*60*/
+	{VFE_CMD_STOP_RECORDING},
+	{VFE_CMD_DUMMY_5},
+	{VFE_CMD_DUMMY_6},
+	{VFE_CMD_CAPTURE, V40_CAPTURE_LEN, 0xFF},
+	{VFE_CMD_DUMMY_7},
+/*65*/
+	{VFE_CMD_STOP},
+	{VFE_CMD_GET_HW_VERSION, V40_GET_HW_VERSION_LEN,
+		V40_GET_HW_VERSION_OFF},
+	{VFE_CMD_GET_FRAME_SKIP_COUNTS},
+	{VFE_CMD_OUTPUT1_BUFFER_ENQ},
+	{VFE_CMD_OUTPUT2_BUFFER_ENQ},
+/*70*/
+	{VFE_CMD_OUTPUT3_BUFFER_ENQ},
+	{VFE_CMD_JPEG_OUT_BUF_ENQ},
+	{VFE_CMD_RAW_OUT_BUF_ENQ},
+	{VFE_CMD_RAW_IN_BUF_ENQ},
+	{VFE_CMD_STATS_AF_ENQ},
+/*75*/
+	{VFE_CMD_STATS_AE_ENQ},
+	{VFE_CMD_STATS_AWB_ENQ},
+	{VFE_CMD_STATS_RS_ENQ},
+	{VFE_CMD_STATS_CS_ENQ},
+	{VFE_CMD_STATS_SKIN_ENQ},
+/*80*/
+	{VFE_CMD_STATS_IHIST_ENQ},
+	{VFE_CMD_DUMMY_8},
+	{VFE_CMD_JPEG_ENC_CFG},
+	{VFE_CMD_DUMMY_9},
+	{VFE_CMD_STATS_AF_START},
+/*85*/
+	{VFE_CMD_STATS_AF_STOP},
+	{VFE_CMD_STATS_AE_START},
+	{VFE_CMD_STATS_AE_STOP},
+	{VFE_CMD_STATS_AWB_START, V40_STATS_AWB_LEN, V40_STATS_AWB_OFF},
+	{VFE_CMD_STATS_AWB_STOP},
+/*90*/
+	{VFE_CMD_STATS_RS_START, V40_STATS_RS_LEN, V40_STATS_RS_OFF},
+	{VFE_CMD_STATS_RS_STOP},
+	{VFE_CMD_STATS_CS_START, V40_STATS_CS_LEN, V40_STATS_CS_OFF},
+	{VFE_CMD_STATS_CS_STOP},
+	{VFE_CMD_STATS_SKIN_START},
+/*95*/
+	{VFE_CMD_STATS_SKIN_STOP},
+	{VFE_CMD_STATS_IHIST_START, V40_STATS_IHIST_LEN, V40_STATS_IHIST_OFF},
+	{VFE_CMD_STATS_IHIST_STOP},
+	{VFE_CMD_DUMMY_10},
+	{VFE_CMD_SYNC_TIMER_SETTING, V40_SYNC_TIMER_LEN, V40_SYNC_TIMER_OFF},
+/*100*/
+	{VFE_CMD_ASYNC_TIMER_SETTING, V40_ASYNC_TIMER_LEN, V40_ASYNC_TIMER_OFF},
+	{VFE_CMD_LIVESHOT},
+	{VFE_CMD_LA_SETUP},
+	{VFE_CMD_LINEARIZATION_CFG, V40_LINEARIZATION_LEN1,
+		V40_LINEARIZATION_OFF1},
+	{VFE_CMD_DEMOSAICV3},
+/*105*/
+	{VFE_CMD_DEMOSAICV3_ABCC_CFG},
+	{VFE_CMD_DEMOSAICV3_DBCC_CFG, V40_DEMOSAICV3_DBCC_LEN,
+		V40_DEMOSAICV3_DBCC_OFF},
+	{VFE_CMD_DEMOSAICV3_DBPC_CFG},
+	{VFE_CMD_DEMOSAICV3_ABF_CFG, V40_DEMOSAICV3_ABF_LEN,
+		V40_DEMOSAICV3_ABF_OFF},
+	{VFE_CMD_DEMOSAICV3_ABCC_UPDATE},
+/*110*/
+	{VFE_CMD_DEMOSAICV3_DBCC_UPDATE, V40_DEMOSAICV3_DBCC_LEN,
+		V40_DEMOSAICV3_DBCC_OFF},
+	{VFE_CMD_DEMOSAICV3_DBPC_UPDATE},
+	{VFE_CMD_XBAR_CFG},
+	{VFE_CMD_MODULE_CFG, V40_MODULE_CFG_LEN, V40_MODULE_CFG_OFF},
+	{VFE_CMD_ZSL},
+/*115*/
+	{VFE_CMD_LINEARIZATION_UPDATE, V40_LINEARIZATION_LEN1,
+		V40_LINEARIZATION_OFF1},
+	{VFE_CMD_DEMOSAICV3_ABF_UPDATE, V40_DEMOSAICV3_ABF_LEN,
+		V40_DEMOSAICV3_ABF_OFF},
+	{VFE_CMD_CLF_CFG, V40_CLF_CFG_LEN, V40_CLF_CFG_OFF},
+	{VFE_CMD_CLF_LUMA_UPDATE, V40_CLF_LUMA_UPDATE_LEN,
+		V40_CLF_LUMA_UPDATE_OFF},
+	{VFE_CMD_CLF_CHROMA_UPDATE, V40_CLF_CHROMA_UPDATE_LEN,
+		V40_CLF_CHROMA_UPDATE_OFF},
+/*120*/
+	{VFE_CMD_PCA_ROLL_OFF_CFG},
+	{VFE_CMD_PCA_ROLL_OFF_UPDATE},
+	{VFE_CMD_GET_REG_DUMP},
+	{VFE_CMD_GET_LINEARIZATON_TABLE},
+	{VFE_CMD_GET_MESH_ROLLOFF_TABLE},
+/*125*/
+	{VFE_CMD_GET_PCA_ROLLOFF_TABLE},
+	{VFE_CMD_GET_RGB_G_TABLE},
+	{VFE_CMD_GET_LA_TABLE},
+	{VFE_CMD_DEMOSAICV3_UPDATE},
+	{VFE_CMD_ACTIVE_REGION_CFG},
+/*130*/
+	{VFE_CMD_COLOR_PROCESSING_CONFIG},
+	{VFE_CMD_STATS_WB_AEC_CONFIG},
+	{VFE_CMD_STATS_WB_AEC_UPDATE},
+	{VFE_CMD_Y_GAMMA_CONFIG},
+	{VFE_CMD_SCALE_OUTPUT1_CONFIG},
+/*135*/
+	{VFE_CMD_SCALE_OUTPUT2_CONFIG},
+	{VFE_CMD_CAPTURE_RAW},
+	{VFE_CMD_STOP_LIVESHOT},
+	{VFE_CMD_RECONFIG_VFE},
+	{VFE_CMD_STATS_REQBUF},
+/*140*/
+	{VFE_CMD_STATS_ENQUEUEBUF},
+	{VFE_CMD_STATS_FLUSH_BUFQ},
+	{VFE_CMD_FOV_ENC_CFG, V40_FOV_ENC_LEN, V40_FOV_ENC_OFF, 0xFF},
+	{VFE_CMD_FOV_VIEW_CFG, V40_FOV_VIEW_LEN, V40_FOV_VIEW_OFF, 0xFF},
+	{VFE_CMD_FOV_ENC_UPDATE, V40_FOV_ENC_LEN, V40_FOV_ENC_OFF, 0xFF},
+/*145*/
+	{VFE_CMD_FOV_VIEW_UPDATE, V40_FOV_VIEW_LEN, V40_FOV_VIEW_OFF, 0xFF},
+	{VFE_CMD_SCALER_ENC_CFG, V40_SCALER_ENC_LEN, V40_SCALER_ENC_OFF, 0xFF},
+	{VFE_CMD_SCALER_VIEW_CFG, V40_SCALER_VIEW_LEN,
+		V40_SCALER_VIEW_OFF, 0xFF},
+	{VFE_CMD_SCALER_ENC_UPDATE, V40_SCALER_ENC_LEN,
+		V40_SCALER_ENC_OFF, 0xFF},
+	{VFE_CMD_SCALER_VIEW_UPDATE, V40_SCALER_VIEW_LEN,
+		V40_SCALER_VIEW_OFF, 0xFF},
+/*150*/
+	{VFE_CMD_COLORXFORM_ENC_CFG, V40_COLORXFORM_ENC_CFG_LEN,
+		V40_COLORXFORM_ENC_CFG_OFF, 0xFF},
+	{VFE_CMD_COLORXFORM_VIEW_CFG, V40_COLORXFORM_VIEW_CFG_LEN,
+		V40_COLORXFORM_VIEW_CFG_OFF},
+	{VFE_CMD_COLORXFORM_ENC_UPDATE, V40_COLORXFORM_ENC_CFG_LEN,
+		V40_COLORXFORM_ENC_CFG_OFF, 0xFF},
+	{VFE_CMD_COLORXFORM_VIEW_UPDATE, V40_COLORXFORM_VIEW_CFG_LEN,
+		V40_COLORXFORM_VIEW_CFG_OFF, 0xFF},
+};
+
+#endif /* __MSM_VFE40_H__ */
diff --git a/drivers/media/video/msm/vfe/msm_vfe40_axi.c b/drivers/media/video/msm/vfe/msm_vfe40_axi.c
new file mode 100644
index 0000000..35d5207
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40_axi.c
@@ -0,0 +1,812 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_isp.h>
+
+#include "msm.h"
+#include "msm_vfe40.h"
+
+static int msm_axi_subdev_s_crystal_freq(struct v4l2_subdev *sd,
+						u32 freq, u32 flags)
+{
+	int rc = 0;
+	int round_rate;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+
+	round_rate = clk_round_rate(axi_ctrl->vfe_clk[0], freq);
+	if (rc < 0) {
+		pr_err("%s: clk_round_rate failed %d\n",
+					__func__, rc);
+		return rc;
+	}
+
+	axi_ctrl->share_ctrl->vfe_clk_rate = round_rate;
+	rc = clk_set_rate(axi_ctrl->vfe_clk[0], round_rate);
+	if (rc < 0)
+		pr_err("%s: clk_set_rate failed %d\n",
+					__func__, rc);
+
+	return rc;
+}
+
+void axi_start(struct axi_ctrl_t *axi_ctrl)
+{
+	switch (axi_ctrl->share_ctrl->operation_mode) {
+	case VFE_OUTPUTS_PREVIEW:
+	case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+		if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_PRIMARY) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+		} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+				VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch1]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out0.ch2]);
+		}
+		break;
+	default:
+		if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+		} else if (axi_ctrl->share_ctrl->outpath.output_mode &
+			VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch0]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch1]);
+			msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+				vfe40_AXI_WM_CFG[axi_ctrl->
+				share_ctrl->outpath.out1.ch2]);
+		}
+		break;
+	}
+}
+
+void axi_stop(struct axi_ctrl_t *axi_ctrl)
+{
+	uint8_t  axiBusyFlag = true;
+	/* axi halt command. */
+	msm_camera_io_w(AXI_HALT,
+		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+	wmb();
+	while (axiBusyFlag) {
+		if (msm_camera_io_r(
+			axi_ctrl->share_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
+			axiBusyFlag = false;
+	}
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(AXI_HALT_CLEAR,
+		axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+
+	/* after axi halt, then ok to apply global reset. */
+	/* enable reset_ack and async timer interrupt only while
+	stopping the pipeline.*/
+	msm_camera_io_w(0xf0000000,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+		axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+	/* Ensure the write order while writing
+	to the command register using the barrier */
+	msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD,
+		axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+static int vfe40_config_axi(
+	struct axi_ctrl_t *axi_ctrl, int mode, uint32_t *ao)
+{
+	uint32_t *ch_info;
+	uint32_t *axi_cfg = ao;
+
+	/* Update the corresponding write masters for each output*/
+	ch_info = axi_cfg + V40_AXI_CFG_LEN;
+	axi_ctrl->share_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out0.ch1 =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out0.image_mode =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out1.ch1 =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out1.image_mode =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
+	axi_ctrl->share_ctrl->outpath.out2.ch1 =
+		0x0000FFFF & (*ch_info++ >> 16);
+	axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+
+	switch (mode) {
+	case OUTPUT_PRIM:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY;
+		break;
+	case OUTPUT_PRIM_ALL_CHNLS:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
+		break;
+	case OUTPUT_PRIM|OUTPUT_SEC:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY;
+		axi_ctrl->share_ctrl->outpath.output_mode |=
+			VFE40_OUTPUT_MODE_SECONDARY;
+		break;
+	case OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY;
+		axi_ctrl->share_ctrl->outpath.output_mode |=
+			VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS;
+		break;
+	case OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
+		axi_ctrl->share_ctrl->outpath.output_mode |=
+			VFE40_OUTPUT_MODE_SECONDARY;
+		break;
+	default:
+		pr_err("%s Invalid AXI mode %d ", __func__, mode);
+		return -EINVAL;
+	}
+	msm_camera_io_w(*ao, axi_ctrl->share_ctrl->vfebase +
+		VFE_BUS_IO_FORMAT_CFG);
+	msm_camera_io_memcpy(axi_ctrl->share_ctrl->vfebase +
+		vfe40_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
+		vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length - V40_AXI_CH_INF_LEN);
+	return 0;
+}
+
+static int msm_axi_config(struct v4l2_subdev *sd, void __user *arg)
+{
+	struct msm_vfe_cfg_cmd cfgcmd;
+	struct msm_isp_cmd vfecmd;
+	int rc = 0;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return -EFAULT;
+	}
+	if (NULL != arg) {
+		if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+	}
+	if (NULL != cfgcmd.value) {
+		if (copy_from_user(&vfecmd,
+				(void __user *)(cfgcmd.value),
+				sizeof(vfecmd))) {
+			pr_err("%s %d: copy_from_user failed\n", __func__,
+				__LINE__);
+			return -EFAULT;
+		}
+	}
+
+	switch (cfgcmd.cmd_type) {
+	case CMD_AXI_CFG_PRIM: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl, OUTPUT_PRIM, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM_ALL_CHNLS: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl, OUTPUT_PRIM_ALL_CHNLS, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl, OUTPUT_PRIM|OUTPUT_SEC, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC_ALL_CHNLS: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl,
+			OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe40_config_axi(axi_ctrl,
+			OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
+		kfree(axio);
+	}
+		break;
+	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS:
+		pr_err("%s Invalid/Unsupported AXI configuration %x",
+			__func__, cfgcmd.cmd_type);
+		break;
+	case CMD_AXI_START:
+		axi_start(axi_ctrl);
+		break;
+	case CMD_AXI_STOP:
+		axi_stop(axi_ctrl);
+		break;
+	default:
+		pr_err("%s Unsupported AXI configuration %x ", __func__,
+			cfgcmd.cmd_type);
+		break;
+	}
+	return rc;
+}
+
+static struct msm_free_buf *vfe40_check_free_buffer(
+	int id, int path, struct axi_ctrl_t *axi_ctrl)
+{
+	struct vfe40_output_ch *outch = NULL;
+	struct msm_free_buf *b = NULL;
+	uint32_t image_mode = 0;
+
+	if (path == VFE_MSG_OUTPUT_PRIMARY)
+		image_mode = axi_ctrl->share_ctrl->outpath.out0.image_mode;
+	else
+		image_mode = axi_ctrl->share_ctrl->outpath.out1.image_mode;
+
+	vfe40_subdev_notify(id, path, image_mode,
+		&axi_ctrl->subdev, axi_ctrl->share_ctrl);
+	outch = vfe40_get_ch(path, axi_ctrl->share_ctrl);
+	if (outch->free_buf.ch_paddr[0])
+		b = &outch->free_buf;
+	return b;
+}
+
+static void vfe_send_outmsg(
+	struct axi_ctrl_t *axi_ctrl, uint8_t msgid,
+	uint32_t ch0_paddr, uint32_t ch1_paddr,
+	uint32_t ch2_paddr, uint32_t image_mode)
+{
+	struct isp_msg_output msg;
+
+	msg.output_id = msgid;
+	msg.buf.image_mode = image_mode;
+	msg.buf.ch_paddr[0]	= ch0_paddr;
+	msg.buf.ch_paddr[1]	= ch1_paddr;
+	msg.buf.ch_paddr[2]	= ch2_paddr;
+	msg.frameCounter = axi_ctrl->share_ctrl->vfeFrameId;
+
+	v4l2_subdev_notify(&axi_ctrl->subdev,
+			NOTIFY_VFE_MSG_OUT,
+			&msg);
+	return;
+}
+
+static void vfe40_process_output_path_irq_0(
+	struct axi_ctrl_t *axi_ctrl)
+{
+	uint32_t ping_pong;
+	uint32_t ch0_paddr, ch1_paddr, ch2_paddr;
+	uint8_t out_bool = 0;
+	struct msm_free_buf *free_buf = NULL;
+
+	free_buf = vfe40_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+		VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+
+	/* we render frames in the following conditions:
+	1. Continuous mode and the free buffer is avaialable.
+	2. In snapshot shot mode, free buffer is not always available.
+	when pending snapshot count is <=1,  then no need to use
+	free buffer.
+	*/
+	out_bool = (
+		(axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_RAW ||
+		axi_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STARTED ||
+		axi_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOP_REQUESTED ||
+		axi_ctrl->share_ctrl->liveshot_state ==
+			VFE_STATE_STOPPED) &&
+		(axi_ctrl->share_ctrl->vfe_capture_count <= 1)) ||
+			free_buf;
+
+	if (out_bool) {
+		ping_pong = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+
+		/* Channel 0*/
+		ch0_paddr = vfe40_get_ch_addr(
+			ping_pong, axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch0);
+		/* Channel 1*/
+		ch1_paddr = vfe40_get_ch_addr(
+			ping_pong, axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch1);
+		/* Channel 2*/
+		ch2_paddr = vfe40_get_ch_addr(
+			ping_pong, axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch2);
+
+		CDBG("output path 0, ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
+			ch0_paddr, ch1_paddr, ch2_paddr);
+		if (free_buf) {
+			/* Y channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch0,
+			free_buf->ch_paddr[0]);
+			/* Chroma channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out0.ch1,
+			free_buf->ch_paddr[1]);
+			if (free_buf->num_planes > 2)
+				vfe40_put_ch_addr(ping_pong,
+					axi_ctrl->share_ctrl->vfebase,
+					axi_ctrl->share_ctrl->outpath.out0.ch2,
+					free_buf->ch_paddr[2]);
+		}
+		if (axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_MAIN ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_MAIN_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_JPEG ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_JPEG_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RAW ||
+			axi_ctrl->share_ctrl->liveshot_state ==
+				VFE_STATE_STOPPED)
+			axi_ctrl->share_ctrl->outpath.out0.capture_cnt--;
+
+		vfe_send_outmsg(axi_ctrl,
+			MSG_ID_OUTPUT_PRIMARY, ch0_paddr,
+			ch1_paddr, ch2_paddr,
+			axi_ctrl->share_ctrl->outpath.out0.image_mode);
+
+		if (axi_ctrl->share_ctrl->liveshot_state == VFE_STATE_STOPPED)
+			axi_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+	} else {
+		axi_ctrl->share_ctrl->outpath.out0.frame_drop_cnt++;
+		CDBG("path_irq_0 - no free buffer!\n");
+	}
+}
+
+static void vfe40_process_output_path_irq_1(
+	struct axi_ctrl_t *axi_ctrl)
+{
+	uint32_t ping_pong;
+	uint32_t ch0_paddr, ch1_paddr, ch2_paddr;
+	/* this must be snapshot main image output. */
+	uint8_t out_bool = 0;
+	struct msm_free_buf *free_buf = NULL;
+
+	free_buf = vfe40_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+		VFE_MSG_OUTPUT_SECONDARY, axi_ctrl);
+	out_bool = ((axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_MAIN ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_MAIN_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RAW ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_JPEG_AND_THUMB) &&
+			(axi_ctrl->share_ctrl->vfe_capture_count <= 1)) ||
+				free_buf;
+
+	if (out_bool) {
+		ping_pong = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+			VFE_BUS_PING_PONG_STATUS);
+
+		/* Y channel */
+		ch0_paddr = vfe40_get_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch0);
+		/* Chroma channel */
+		ch1_paddr = vfe40_get_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch1);
+		ch2_paddr = vfe40_get_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch2);
+
+		CDBG("%s ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
+			__func__, ch0_paddr, ch1_paddr, ch2_paddr);
+		if (free_buf) {
+			/* Y channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch0,
+			free_buf->ch_paddr[0]);
+			/* Chroma channel */
+			vfe40_put_ch_addr(ping_pong,
+			axi_ctrl->share_ctrl->vfebase,
+			axi_ctrl->share_ctrl->outpath.out1.ch1,
+			free_buf->ch_paddr[1]);
+			if (free_buf->num_planes > 2)
+				vfe40_put_ch_addr(ping_pong,
+					axi_ctrl->share_ctrl->vfebase,
+					axi_ctrl->share_ctrl->outpath.out1.ch2,
+					free_buf->ch_paddr[2]);
+		}
+		if (axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_THUMB_AND_MAIN ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_MAIN_AND_THUMB ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RAW ||
+			axi_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_JPEG_AND_THUMB)
+			axi_ctrl->share_ctrl->outpath.out1.capture_cnt--;
+
+		vfe_send_outmsg(axi_ctrl,
+			MSG_ID_OUTPUT_SECONDARY, ch0_paddr,
+			ch1_paddr, ch2_paddr,
+			axi_ctrl->share_ctrl->outpath.out1.image_mode);
+
+	} else {
+		axi_ctrl->share_ctrl->outpath.out1.frame_drop_cnt++;
+		CDBG("path_irq_1 - no free buffer!\n");
+	}
+}
+
+static void msm_axi_process_irq(struct v4l2_subdev *sd, void *arg)
+{
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	uint32_t irqstatus = (uint32_t) arg;
+
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return;
+	}
+	/* next, check output path related interrupts. */
+	if (irqstatus &
+		VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK) {
+		CDBG("Image composite done 0 irq occured.\n");
+		vfe40_process_output_path_irq_0(axi_ctrl);
+	}
+	if (irqstatus &
+		VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK) {
+		CDBG("Image composite done 1 irq occured.\n");
+		vfe40_process_output_path_irq_1(axi_ctrl);
+	}
+	/* in snapshot mode if done then send
+	snapshot done message */
+	if (axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_MAIN ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_MAIN_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_THUMB_AND_JPEG ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_JPEG_AND_THUMB ||
+		axi_ctrl->share_ctrl->operation_mode ==
+			VFE_OUTPUTS_RAW) {
+		if ((axi_ctrl->share_ctrl->outpath.out0.capture_cnt == 0)
+				&& (axi_ctrl->share_ctrl->outpath.out1.
+				capture_cnt == 0)) {
+			msm_camera_io_w_mb(
+				CAMIF_COMMAND_STOP_IMMEDIATELY,
+				axi_ctrl->share_ctrl->vfebase +
+				VFE_CAMIF_COMMAND);
+			vfe40_send_isp_msg(&axi_ctrl->subdev,
+				axi_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_SNAPSHOT_DONE);
+		}
+	}
+}
+
+static int msm_axi_buf_cfg(struct v4l2_subdev *sd, void __user *arg)
+{
+	struct msm_camvfe_params *vfe_params =
+		(struct msm_camvfe_params *)arg;
+	struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	void *data = vfe_params->data;
+	int rc = 0;
+
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return -EFAULT;
+	}
+
+	switch (cmd->cmd_type) {
+	case CMD_CONFIG_PING_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe40_output_ch *outch =
+			vfe40_get_ch(path, axi_ctrl->share_ctrl);
+		outch->ping = *((struct msm_free_buf *)data);
+	}
+		break;
+
+	case CMD_CONFIG_PONG_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe40_output_ch *outch =
+			vfe40_get_ch(path, axi_ctrl->share_ctrl);
+		outch->pong = *((struct msm_free_buf *)data);
+	}
+		break;
+
+	case CMD_CONFIG_FREE_BUF_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe40_output_ch *outch =
+			vfe40_get_ch(path, axi_ctrl->share_ctrl);
+		outch->free_buf = *((struct msm_free_buf *)data);
+	}
+		break;
+	default:
+		pr_err("%s Unsupported AXI Buf config %x ", __func__,
+			cmd->cmd_type);
+	}
+	return rc;
+};
+
+static struct msm_cam_clk_info vfe40_clk_info[] = {
+	{"vfe_clk_src", 266670000},
+	{"camss_vfe_vfe_clk", -1},
+	{"camss_csi_vfe_clk", -1},
+	{"top_clk", -1},
+	{"iface_clk", -1},
+	{"bus_clk", -1},
+};
+
+int msm_axi_subdev_init(struct v4l2_subdev *sd,
+			struct msm_cam_media_controller *mctl)
+{
+	int rc = 0;
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	v4l2_set_subdev_hostdata(sd, mctl);
+	spin_lock_init(&axi_ctrl->tasklet_lock);
+	INIT_LIST_HEAD(&axi_ctrl->tasklet_q);
+	spin_lock_init(&axi_ctrl->share_ctrl->sd_notify_lock);
+
+	axi_ctrl->share_ctrl->vfebase = ioremap(axi_ctrl->vfemem->start,
+		resource_size(axi_ctrl->vfemem));
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		rc = -ENOMEM;
+		pr_err("%s: vfe ioremap failed\n", __func__);
+		goto remap_failed;
+	}
+
+	if (axi_ctrl->fs_vfe == NULL) {
+		axi_ctrl->fs_vfe =
+			regulator_get(&axi_ctrl->pdev->dev, "vdd");
+		if (IS_ERR(axi_ctrl->fs_vfe)) {
+			pr_err("%s: Regulator FS_VFE get failed %ld\n",
+				__func__, PTR_ERR(axi_ctrl->fs_vfe));
+			axi_ctrl->fs_vfe = NULL;
+			goto fs_failed;
+		} else if (regulator_enable(axi_ctrl->fs_vfe)) {
+			pr_err("%s: Regulator FS_VFE enable failed\n",
+							__func__);
+			regulator_put(axi_ctrl->fs_vfe);
+			axi_ctrl->fs_vfe = NULL;
+			goto fs_failed;
+		}
+	}
+	rc = msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe40_clk_info,
+			axi_ctrl->vfe_clk, ARRAY_SIZE(vfe40_clk_info), 1);
+	if (rc < 0)
+			goto clk_enable_failed;
+
+	msm_camio_bus_scale_cfg(
+		mctl->sdata->pdata->cam_bus_scale_table, S_INIT);
+	msm_camio_bus_scale_cfg(
+		mctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+
+	axi_ctrl->share_ctrl->register_total = VFE40_REGISTER_TOTAL;
+
+	enable_irq(axi_ctrl->vfeirq->start);
+
+	return rc;
+clk_enable_failed:
+	regulator_disable(axi_ctrl->fs_vfe);
+	regulator_put(axi_ctrl->fs_vfe);
+	axi_ctrl->fs_vfe = NULL;
+fs_failed:
+	iounmap(axi_ctrl->share_ctrl->vfebase);
+	axi_ctrl->share_ctrl->vfebase = NULL;
+remap_failed:
+	disable_irq(axi_ctrl->vfeirq->start);
+	return rc;
+}
+
+void msm_axi_subdev_release(struct v4l2_subdev *sd)
+{
+	struct msm_cam_media_controller *pmctl =
+		(struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
+	struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+	if (!axi_ctrl->share_ctrl->vfebase) {
+		pr_err("%s: base address unmapped\n", __func__);
+		return;
+	}
+
+	CDBG("%s, free_irq\n", __func__);
+	disable_irq(axi_ctrl->vfeirq->start);
+	tasklet_kill(&axi_ctrl->vfe40_tasklet);
+	msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe40_clk_info,
+		axi_ctrl->vfe_clk, ARRAY_SIZE(vfe40_clk_info), 0);
+
+	if (axi_ctrl->fs_vfe) {
+		regulator_disable(axi_ctrl->fs_vfe);
+		regulator_put(axi_ctrl->fs_vfe);
+		axi_ctrl->fs_vfe = NULL;
+	}
+	iounmap(axi_ctrl->share_ctrl->vfebase);
+	axi_ctrl->share_ctrl->vfebase = NULL;
+
+	if (atomic_read(&axi_ctrl->share_ctrl->irq_cnt))
+		pr_warning("%s, Warning IRQ Count not ZERO\n", __func__);
+
+	msm_camio_bus_scale_cfg(
+		pmctl->sdata->pdata->cam_bus_scale_table, S_EXIT);
+}
+
+static long msm_axi_subdev_ioctl(struct v4l2_subdev *sd,
+			unsigned int cmd, void *arg)
+{
+	int rc = -ENOIOCTLCMD;
+	switch (cmd) {
+	case VIDIOC_MSM_AXI_INIT:
+		rc = msm_axi_subdev_init(sd,
+			(struct msm_cam_media_controller *)arg);
+		break;
+	case VIDIOC_MSM_AXI_CFG:
+		rc = msm_axi_config(sd, arg);
+		break;
+	case VIDIOC_MSM_AXI_IRQ:
+		msm_axi_process_irq(sd, arg);
+		rc = 0;
+		break;
+	case VIDIOC_MSM_AXI_BUF_CFG:
+		msm_axi_buf_cfg(sd, arg);
+		rc = 0;
+		break;
+	case VIDIOC_MSM_AXI_RELEASE:
+		msm_axi_subdev_release(sd);
+		rc = 0;
+		break;
+	default:
+		pr_err("%s: command not found\n", __func__);
+	}
+	return rc;
+}
+
+static const struct v4l2_subdev_core_ops msm_axi_subdev_core_ops = {
+	.ioctl = msm_axi_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_video_ops msm_axi_subdev_video_ops = {
+	.s_crystal_freq = msm_axi_subdev_s_crystal_freq,
+};
+
+static const struct v4l2_subdev_ops msm_axi_subdev_ops = {
+	.core = &msm_axi_subdev_core_ops,
+	.video = &msm_axi_subdev_video_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_axi_internal_ops;
+
+void vfe40_axi_probe(struct axi_ctrl_t *axi_ctrl)
+{
+	struct msm_cam_subdev_info sd_info;
+	v4l2_subdev_init(&axi_ctrl->subdev, &msm_axi_subdev_ops);
+	axi_ctrl->subdev.internal_ops = &msm_axi_internal_ops;
+	axi_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(axi_ctrl->subdev.name,
+			 sizeof(axi_ctrl->subdev.name), "axi");
+	v4l2_set_subdevdata(&axi_ctrl->subdev, axi_ctrl);
+
+	sd_info.sdev_type = AXI_DEV;
+	sd_info.sd_index = axi_ctrl->pdev->id;
+	sd_info.irq_num = 0;
+	msm_cam_register_subdev_node(&axi_ctrl->subdev, &sd_info);
+}
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index 708d658..6f9af36 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -1,4 +1,5 @@
 /* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,6 +28,8 @@
 #include <linux/qpnp/pwm.h>
 
 #define QPNP_LPG_DRIVER_NAME	"qcom,qpnp-pwm"
+#define QPNP_LPG_CHANNEL_BASE	"qpnp-lpg-channel-base"
+#define QPNP_LPG_LUT_BASE	"qpnp-lpg-lut-base"
 
 /* LPG Control for LPG_PATTERN_CONFIG */
 #define QPNP_RAMP_DIRECTION_SHIFT	4
@@ -207,26 +210,19 @@
 
 static RADIX_TREE(lpg_dev_tree, GFP_KERNEL);
 
-struct qpnp_lut_default_config {
-	u32		*duty_pct_list;
-	int		size;
-	int		start_idx;
-};
-
 struct qpnp_lut_config {
-	struct qpnp_lut_default_config def_config;
-	u8		*duty_pct_list;
-	int		list_size;
-	int		lo_index;
-	int		hi_index;
-	int		lut_pause_hi_cnt;
-	int		lut_pause_lo_cnt;
-	int		ramp_step_ms;
-	bool		ramp_direction;
-	bool		pattern_repeat;
-	bool		ramp_toggle;
-	bool		enable_pause_hi;
-	bool		enable_pause_lo;
+	u8	*duty_pct_list;
+	int	list_len;
+	int	lo_index;
+	int	hi_index;
+	int	lut_pause_hi_cnt;
+	int	lut_pause_lo_cnt;
+	int	ramp_step_ms;
+	bool	ramp_direction;
+	bool	pattern_repeat;
+	bool	ramp_toggle;
+	bool	enable_pause_hi;
+	bool	enable_pause_lo;
 };
 
 struct qpnp_lpg_config {
@@ -234,8 +230,6 @@
 	u16			base_addr;
 	u16			lut_base_addr;
 	u16			lut_size;
-	bool			bypass_lut;
-	bool			lpg_configured;
 };
 
 struct qpnp_pwm_config {
@@ -304,6 +298,8 @@
 
 #define QPNP_ENABLE_LUT_CONTROL(p_val)	qpnp_set_control(p_val, 1, 1, 1, 0, 1)
 #define QPNP_ENABLE_PWM_CONTROL(p_val)	qpnp_set_control(p_val, 1, 1, 0, 1, 0)
+#define QPNP_IS_PWM_CONFIG_SELECTED(val) (val & QPNP_PWM_SRC_SELECT_MASK)
+
 
 static inline void qpnp_convert_to_lut_flags(int *flags,
 				struct qpnp_lut_config *l_config)
@@ -316,10 +312,10 @@
 }
 
 static inline void qpnp_set_lut_params(struct lut_params *l_params,
-				struct qpnp_lut_config *l_config)
+		struct qpnp_lut_config *l_config, int s_idx, int size)
 {
-	l_params->start_idx = l_config->def_config.start_idx;
-	l_params->idx_len = l_config->def_config.size;
+	l_params->start_idx = s_idx;
+	l_params->idx_len = size;
 	l_params->lut_pause_hi = l_config->lut_pause_hi_cnt;
 	l_params->lut_pause_lo = l_config->lut_pause_lo_cnt;
 	l_params->ramp_step_ms = l_config->ramp_step_ms;
@@ -442,7 +438,7 @@
 	struct qpnp_lut_config	*lut = &chip->lpg_config.lut_config;
 	int			i, pwm_size, rc = 0;
 	int			burst_size = SPMI_MAX_BUF_LEN;
-	int			list_len = lut->list_size << 1;
+	int			list_len = lut->list_len << 1;
 	int			offset = lut->lo_index << 2;
 
 	pwm_size = QPNP_GET_PWM_SIZE(
@@ -451,15 +447,15 @@
 
 	max_pwm_value = (1 << pwm_size) - 1;
 
-	if (unlikely(lut->list_size != (lut->hi_index - lut->lo_index + 1))) {
+	if (unlikely(lut->list_len != (lut->hi_index - lut->lo_index + 1))) {
 		pr_err("LUT internal Data structure corruption detected\n");
-		pr_err("LUT list size: %d\n", lut->list_size);
+		pr_err("LUT list size: %d\n", lut->list_len);
 		pr_err("However, index size is: %d\n",
 				(lut->hi_index - lut->lo_index + 1));
 		return -EINVAL;
 	}
 
-	for (i = 0; i <= lut->list_size; i++) {
+	for (i = 0; i <= lut->list_len; i++) {
 		if (raw_value)
 			pwm_value = duty_pct[i];
 		else
@@ -597,7 +593,7 @@
 		lpg_config->base_addr, QPNP_LPG_PWM_TYPE_CONFIG, 1, chip);
 }
 
-static int qpnp_pwm_configure_control(struct pwm_device *pwm)
+static int qpnp_configure_pwm_control(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -615,7 +611,7 @@
 
 }
 
-static int qpnp_lpg_configure_control(struct pwm_device *pwm)
+static int qpnp_configure_lpg_control(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -789,7 +785,7 @@
 		pr_err("Failed to configure LUT pattern");
 		return rc;
 	}
-	rc = qpnp_lpg_configure_control(pwm);
+	rc = qpnp_configure_lpg_control(pwm);
 	if (rc) {
 		pr_err("Failed to configure pause registers");
 		return rc;
@@ -829,7 +825,7 @@
 		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
 }
 
-static int qpnp_lpg_disable_lut(struct pwm_device *pwm)
+static int qpnp_disable_lut(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -863,7 +859,7 @@
 		lpg_config->base_addr, QPNP_RAMP_CONTROL, 1, chip);
 }
 
-static int qpnp_lpg_disable_pwm(struct pwm_device *pwm)
+static int qpnp_disable_pwm(struct pwm_device *pwm)
 {
 	struct qpnp_lpg_config	*lpg_config = &pwm->chip->lpg_config;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
@@ -914,15 +910,13 @@
 		return rc;
 	}
 
-	rc = qpnp_pwm_configure_control(pwm);
+	rc = qpnp_configure_pwm_control(pwm);
 	if (rc) {
 		pr_err("Could not update PWM control for");
 		pr_err("channel %d rc=%d\n", pwm_config->channel_id, rc);
 		return rc;
 	}
 
-	pwm->chip->lpg_config.lpg_configured = 1;
-
 	pr_debug("duty/period=%u/%u usec: pwm_value=%d (of %d)\n",
 		 (unsigned)duty_us, (unsigned)period_us,
 		 pwm_config->pwm_value, 1 << period->pwm_size);
@@ -935,8 +929,6 @@
 {
 	struct qpnp_lpg_config		*lpg_config;
 	struct qpnp_lut_config		*lut_config;
-	struct qpnp_lut_default_config  *def_lut_config =
-					&lut_config->def_config;
 	struct pwm_period_config	*period;
 	struct qpnp_pwm_config		*pwm_config;
 	int				start_idx = lut_params.start_idx;
@@ -948,23 +940,6 @@
 	pwm_config = &pwm->pwm_config;
 	lpg_config = &pwm->chip->lpg_config;
 	lut_config = &lpg_config->lut_config;
-	def_lut_config = &lut_config->def_config;
-
-	if ((start_idx + len) > lpg_config->lut_size) {
-		pr_err("Exceed LUT limit\n");
-		return -EINVAL;
-	}
-	if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
-		(unsigned)period_us < PM_PWM_PERIOD_MIN) {
-		pr_err("Period out of range\n");
-		return -EINVAL;
-	}
-
-	if (!pwm_config->in_use) {
-		pr_err("channel_id: %d: stale handle?\n",
-				pwm_config->channel_id);
-		return -EINVAL;
-	}
 
 	period = &pwm_config->period;
 
@@ -981,37 +956,10 @@
 	if (flags & PM_PWM_LUT_USE_RAW_VALUE)
 		raw_lut = 1;
 
-	lut_config->list_size = len;
+	lut_config->list_len = len;
 	lut_config->lo_index = start_idx;
 	lut_config->hi_index = start_idx + len - 1;
 
-	/*
-	 * LUT may not be specified in device tree by default.
-	 * This is the first time user is configuring it.
-	 */
-	if (lpg_config->bypass_lut) {
-		def_lut_config->duty_pct_list = kzalloc(sizeof(u32) *
-							len, GFP_KERNEL);
-		if (!def_lut_config->duty_pct_list) {
-			pr_err("kzalloc failed on def_duty_pct_list\n");
-			return -ENOMEM;
-		}
-
-		lut_config->duty_pct_list = kzalloc(lpg_config->lut_size *
-						sizeof(u16), GFP_KERNEL);
-		if (!lut_config->duty_pct_list) {
-			pr_err("kzalloc failed on duty_pct_list\n");
-			kfree(def_lut_config->duty_pct_list);
-			return -ENOMEM;
-		}
-
-		def_lut_config->size = len;
-		def_lut_config->start_idx = start_idx;
-		memcpy(def_lut_config->duty_pct_list, duty_pct, len);
-
-		lpg_config->bypass_lut = 0;
-	}
-
 	rc = qpnp_lpg_change_table(pwm, duty_pct, raw_lut);
 	if (rc) {
 		pr_err("qpnp_lpg_change_table: rc=%d\n", rc);
@@ -1041,12 +989,28 @@
 	lut_config->ramp_toggle	    = !!(flags & PM_PWM_LUT_REVERSE);
 	lut_config->enable_pause_hi = !!(flags & PM_PWM_LUT_PAUSE_HI_EN);
 	lut_config->enable_pause_lo = !!(flags & PM_PWM_LUT_PAUSE_LO_EN);
-	lpg_config->bypass_lut = 0;
 
 	rc = qpnp_lpg_change_lut(pwm);
 
-	if (!rc)
-		lpg_config->lpg_configured = 1;
+	return rc;
+}
+
+static int _pwm_enable(struct pwm_device *pwm)
+{
+	int rc;
+	struct qpnp_lpg_chip *chip;
+
+	chip = pwm->chip;
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	if (QPNP_IS_PWM_CONFIG_SELECTED(
+		chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]))
+		rc = qpnp_lpg_enable_pwm(pwm);
+	else
+		rc = qpnp_lpg_enable_lut(pwm);
+
+	mutex_unlock(&pwm->chip->lpg_mutex);
 
 	return rc;
 }
@@ -1108,11 +1072,10 @@
 	pwm_config = &pwm->pwm_config;
 
 	if (pwm_config->in_use) {
-		qpnp_lpg_disable_pwm(pwm);
-		qpnp_lpg_disable_lut(pwm);
+		qpnp_disable_pwm(pwm);
+		qpnp_disable_lut(pwm);
 		pwm_config->in_use = 0;
 		pwm_config->lable = NULL;
-		pwm->chip->lpg_config.lpg_configured = 0;
 	}
 
 	mutex_unlock(&pwm->chip->lpg_mutex);
@@ -1155,43 +1118,20 @@
 int pwm_enable(struct pwm_device *pwm)
 {
 	struct qpnp_pwm_config	*p_config;
-	struct qpnp_lpg_chip	*chip;
-	int			rc = 0;
 
 	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
 		pr_err("Invalid pwm handle or no pwm_chip\n");
 		return -EINVAL;
 	}
 
-	mutex_lock(&pwm->chip->lpg_mutex);
-
-	chip = pwm->chip;
 	p_config = &pwm->pwm_config;
 
 	if (!p_config->in_use) {
 		pr_err("channel_id: %d: stale handle?\n", p_config->channel_id);
-		rc = -EINVAL;
-		goto out_unlock;
+		return -EINVAL;
 	}
 
-	if (!pwm->chip->lpg_config.lpg_configured) {
-		pr_err("Request received to enable PWM for channel Id: %d\n",
-							p_config->channel_id);
-		pr_err("However, PWM isn't configured\n");
-		pr_err("falling back to defaultconfiguration\n");
-		rc = _pwm_config(pwm, p_config->pwm_duty,
-					p_config->pwm_period);
-		if (rc) {
-			pr_err("Could not apply default PWM config\n");
-			goto out_unlock;
-		}
-	}
-
-	rc = qpnp_lpg_enable_pwm(pwm);
-
-out_unlock:
-	mutex_unlock(&pwm->chip->lpg_mutex);
-	return rc;
+	return _pwm_enable(pwm);
 }
 EXPORT_SYMBOL_GPL(pwm_enable);
 
@@ -1215,21 +1155,50 @@
 	pwm_config = &pwm->pwm_config;
 
 	if (pwm_config->in_use) {
-		if (!pwm->chip->lpg_config.lpg_configured) {
-			pr_err("Request received to disable PWM for\n");
-			pr_err("channel Id: %d\n", pwm_config->channel_id);
-			pr_err("However PWM is not configured by any means\n");
-			goto out_unlock;
-		}
-		qpnp_lpg_disable_pwm(pwm);
+		if (QPNP_IS_PWM_CONFIG_SELECTED(
+			chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]))
+			qpnp_disable_pwm(pwm);
+		else
+			qpnp_disable_lut(pwm);
 	}
 
-out_unlock:
 	mutex_unlock(&pwm->chip->lpg_mutex);
 }
 EXPORT_SYMBOL_GPL(pwm_disable);
 
 /**
+ * pwm_change_mode - Change the PWM mode configuration
+ * @pwm: the PWM device
+ * @mode: Mode selection value
+ */
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
+{
+	int rc;
+
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+		pr_err("Invalid pwm handle or no pwm_chip\n");
+		return -EINVAL;
+	}
+
+	if (mode < PM_PWM_MODE_PWM || mode > PM_PWM_MODE_LPG) {
+		pr_err("Invalid mode value\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&pwm->chip->lpg_mutex);
+
+	if (mode)
+		rc = qpnp_configure_lpg_control(pwm);
+	else
+		rc = qpnp_configure_pwm_control(pwm);
+
+	mutex_unlock(&pwm->chip->lpg_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_change_mode);
+
+/**
  * pwm_config_period - change PWM period
  *
  * @pwm: the PWM device
@@ -1356,11 +1325,29 @@
 	if (pwm->chip == NULL)
 		return -ENODEV;
 
+	if (!pwm->pwm_config.in_use) {
+		pr_err("channel_id: %d: stale handle?\n",
+				pwm->pwm_config.channel_id);
+		return -EINVAL;
+	}
+
 	if (duty_pct == NULL && !(lut_params.flags & PM_PWM_LUT_NO_TABLE)) {
 		pr_err("Invalid duty_pct with flag\n");
 		return -EINVAL;
 	}
 
+	if ((lut_params.start_idx + lut_params.idx_len) >
+				pwm->chip->lpg_config.lut_size) {
+		pr_err("Exceed LUT limit\n");
+		return -EINVAL;
+	}
+
+	if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN) {
+		pr_err("Period out of range\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&pwm->chip->lpg_mutex);
 
 	rc = _pwm_lut_config(pwm, period_us, duty_pct, lut_params);
@@ -1371,87 +1358,136 @@
 }
 EXPORT_SYMBOL_GPL(pwm_lut_config);
 
-/**
- * pwm_lut_enable - control a PWM device to start/stop LUT ramp
- * @pwm: the PWM device
- * @start: to start (1), or stop (0)
- */
-int pwm_lut_enable(struct pwm_device *pwm, int start)
+static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node,
+		struct device_node *of_parent, struct qpnp_lpg_chip *chip)
 {
-	struct qpnp_lpg_config	*lpg_config;
-	struct qpnp_pwm_config	*p_config;
-	struct lut_params	lut_params;
-	int			rc = 0;
+	int rc, period;
+	struct pwm_device *pwm_dev = &chip->pwm_dev;
 
-	if (pwm == NULL || IS_ERR(pwm)) {
-		pr_err("Invalid pwm handle\n");
+	rc = of_property_read_u32(of_parent, "qcom,period", (u32 *)&period);
+	if (rc) {
+		pr_err("node is missing PWM Period prop");
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_pwm_node, "qcom,duty",
+				&pwm_dev->pwm_config.pwm_duty);
+	if (rc) {
+		pr_err("node is missing PWM Duty prop");
+		return rc;
+	}
+
+	rc = _pwm_config(pwm_dev, pwm_dev->pwm_config.pwm_duty, period);
+
+	return rc;
+}
+
+#define qpnp_check_optional_dt_bindings(func)	\
+do {					\
+	rc = func;			\
+	if (rc && rc != -EINVAL)	\
+		goto out;		\
+	rc = 0;				\
+} while (0);
+
+static int qpnp_parse_lpg_dt_config(struct device_node *of_lpg_node,
+		struct device_node *of_parent, struct qpnp_lpg_chip *chip)
+{
+	int rc, period, list_size, start_idx, *duty_pct_list;
+	struct pwm_device *pwm_dev = &chip->pwm_dev;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
+	struct lut_params	lut_params;
+
+	rc = of_property_read_u32(of_parent, "qcom,period", &period);
+	if (rc) {
+		pr_err("node is missing PWM Period prop");
+		return rc;
+	}
+
+	if (!of_get_property(of_lpg_node, "qcom,duty-percents", &list_size)) {
+		pr_err("node is missing duty-pct list");
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_lpg_node, "cell-index", &start_idx);
+	if (rc) {
+		pr_err("Missing start index");
+		return rc;
+	}
+
+	list_size /= sizeof(u32);
+
+	if (list_size + start_idx > lpg_config->lut_size) {
+		pr_err("duty pct list size overflows\n");
 		return -EINVAL;
 	}
 
-	if (pwm->chip == NULL)
-		return -ENODEV;
+	duty_pct_list = kzalloc(sizeof(u32) * list_size, GFP_KERNEL);
 
-	lpg_config = &pwm->chip->lpg_config;
-	p_config = &pwm->pwm_config;
-
-	mutex_lock(&pwm->chip->lpg_mutex);
-
-	if (start) {
-		if (!lpg_config->lpg_configured) {
-			pr_err("Request received to enable LUT for\n");
-			pr_err("LPG channel %d\n", pwm->pwm_config.channel_id);
-			pr_err("But LPG is not configured, falling back to\n");
-			pr_err(" default LUT configuration if available\n");
-
-			if (lpg_config->bypass_lut) {
-				pr_err("No default LUT configuration found\n");
-				pr_err("Use pwm_lut_config() to configure\n");
-				rc = -EINVAL;
-				goto out;
-			}
-
-			qpnp_set_lut_params(&lut_params,
-					&lpg_config->lut_config);
-
-			rc = _pwm_lut_config(pwm, p_config->pwm_period,
-			(int *)lpg_config->lut_config.def_config.duty_pct_list,
-			lut_params);
-			if (rc) {
-				pr_err("Could not set the default LUT conf\n");
-				goto out;
-			}
-		}
-
-		rc = qpnp_lpg_enable_lut(pwm);
-	} else {
-		if (unlikely(!lpg_config->lpg_configured)) {
-			pr_err("LPG isn't configured\n");
-			rc = -EINVAL;
-			goto out;
-		}
-		rc = qpnp_lpg_disable_lut(pwm);
+	if (!duty_pct_list) {
+		pr_err("kzalloc failed on duty_pct_list\n");
+		return -ENOMEM;
 	}
 
+	rc = of_property_read_u32_array(of_lpg_node, "qcom,duty-percents",
+						duty_pct_list, list_size);
+	if (rc) {
+		pr_err("invalid or missing property:\n");
+		pr_err("qcom,duty-pcts-list\n");
+		kfree(duty_pct_list);
+		return rc;
+	}
+
+	/* Read optional properties */
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+		"qcom,ramp-step-duration", &lut_config->ramp_step_ms));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+		"qcom,lpg-lut-pause-hi", &lut_config->lut_pause_hi_cnt));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+		"qcom,lpg-lut-pause-lo", &lut_config->lut_pause_lo_cnt));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-ramp-direction",
+				(u32 *)&lut_config->ramp_direction));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-pattern-repeat",
+				(u32 *)&lut_config->pattern_repeat));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-ramp-toggle",
+				(u32 *)&lut_config->ramp_toggle));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-enable-pause-hi",
+				(u32 *)&lut_config->enable_pause_hi));
+	qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+				"qcom,lpg-lut-enable-pause-lo",
+				(u32 *)&lut_config->enable_pause_lo));
+
+	qpnp_set_lut_params(&lut_params, lut_config, start_idx, list_size);
+
+	_pwm_lut_config(pwm_dev, period, duty_pct_list, lut_params);
+
 out:
-	mutex_unlock(&pwm->chip->lpg_mutex);
+	kfree(duty_pct_list);
 	return rc;
 }
-EXPORT_SYMBOL_GPL(pwm_lut_enable);
 
 /* Fill in lpg device elements based on values found in device tree. */
-static int qpnp_lpg_get_dt_config(struct spmi_device *spmi,
+static int qpnp_parse_dt_config(struct spmi_device *spmi,
 					struct qpnp_lpg_chip *chip)
 {
-	int			rc;
+	int			rc, enable;
+	const char		*lable;
 	struct resource		*res;
+	struct device_node	*node;
+	int found_pwm_subnode = 0;
+	int found_lpg_subnode = 0;
 	struct device_node	*of_node = spmi->dev.of_node;
-	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
 	struct pwm_device	*pwm_dev = &chip->pwm_dev;
-	struct qpnp_lut_config	*lut_config = &chip->lpg_config.lut_config;
-	struct qpnp_lut_default_config	*def_lut_config =
-						&lut_config->def_config;
+	struct qpnp_lpg_config	*lpg_config = &chip->lpg_config;
+	struct qpnp_lut_config	*lut_config = &lpg_config->lut_config;
 
-	res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+	res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+					QPNP_LPG_CHANNEL_BASE);
 	if (!res) {
 		dev_err(&spmi->dev, "%s: node is missing base address\n",
 			__func__);
@@ -1460,7 +1496,8 @@
 
 	lpg_config->base_addr = res->start;
 
-	res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 1);
+	res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+						QPNP_LPG_LUT_BASE);
 	if (!res) {
 		dev_err(&spmi->dev, "%s: node is missing LUT base address\n",
 								__func__);
@@ -1471,88 +1508,68 @@
 	/* Each entry of LUT is of 2 bytes */
 	lpg_config->lut_size = resource_size(res) >> 1;
 
+	lut_config->duty_pct_list = kzalloc(lpg_config->lut_size *
+						sizeof(u16), GFP_KERNEL);
+	if (!lut_config->duty_pct_list) {
+		pr_err("can not allocate duty pct list\n");
+		return -ENOMEM;
+	}
 
 	rc = of_property_read_u32(of_node, "qcom,channel-id",
 				&pwm_dev->pwm_config.channel_id);
 	if (rc) {
-		dev_err(&spmi->dev, "%s: node is missing LPG channel id",
+		dev_err(&spmi->dev, "%s: node is missing LPG channel id\n",
 								__func__);
-		return rc;
+		goto out;
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,period",
-				&pwm_dev->pwm_config.pwm_period);
-	if (rc) {
-		dev_err(&spmi->dev, "%s: node is missing PWM Period value",
+	for_each_child_of_node(of_node, node) {
+		rc = of_property_read_string(node, "label", &lable);
+		if (rc) {
+			dev_err(&spmi->dev, "%s: Missing lable property\n",
 								__func__);
-		return rc;
+			goto out;
+		}
+		if (!strncmp(lable, "pwm", 3)) {
+			rc = qpnp_parse_pwm_dt_config(node, of_node, chip);
+			if (rc)
+				goto out;
+			found_pwm_subnode = 1;
+		} else if (!strncmp(lable, "lpg", 3)) {
+			qpnp_parse_lpg_dt_config(node, of_node, chip);
+			if (rc)
+				goto out;
+			found_lpg_subnode = 1;
+		} else {
+			dev_err(&spmi->dev, "%s: Invalid value for lable prop",
+								__func__);
+		}
 	}
 
-	if (!of_get_property(of_node, "qcom,duty-percents",
-						&def_lut_config->size)) {
-		lpg_config->bypass_lut = 1;
-	}
-
-	if (lpg_config->bypass_lut)
+	rc = of_property_read_u32(of_node, "qcom,mode-select", &enable);
+	if (rc)
 		goto read_opt_props;
 
-	rc = of_property_read_u32(of_node, "qcom,start-index",
-					&def_lut_config->start_idx);
-
-	if (rc) {
-		dev_err(&spmi->dev, "Missing start index");
-		return rc;
+	if ((enable == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
+		(enable == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
+		dev_err(&spmi->dev, "%s: Invalid mode select\n", __func__);
+		rc = -EINVAL;
+		goto out;
 	}
 
-	def_lut_config->size /= sizeof(u32);
-
-	def_lut_config->duty_pct_list = kzalloc(sizeof(u32) *
-					def_lut_config->size, GFP_KERNEL);
-	if (!def_lut_config->duty_pct_list) {
-		dev_err(&spmi->dev, "%s: kzalloc failed on duty_pct_list\n",
-								__func__);
-		return -ENOMEM;
-	}
-
-	rc = of_property_read_u32_array(of_node, "qcom,duty-percents",
-		def_lut_config->duty_pct_list, def_lut_config->size);
-	if (rc) {
-		dev_err(&spmi->dev, "invalid or missing property:\n");
-		dev_err(&spmi->dev, "qcom,duty-pcts-list\n");
-		kfree(def_lut_config->duty_pct_list);
-		return rc;
-	}
-
-	lut_config->duty_pct_list = kzalloc(lpg_config->lut_size * sizeof(u16),
-								GFP_KERNEL);
-	if (!lut_config->duty_pct_list) {
-		dev_err(&spmi->dev, "can not allocate duty pct list\n");
-		kfree(def_lut_config->duty_pct_list);
-		return -ENOMEM;
-	}
+	pwm_change_mode(pwm_dev, enable);
+	_pwm_enable(pwm_dev);
 
 read_opt_props:
 	/* Initialize optional config parameters from DT if provided */
-	of_property_read_u32(of_node, "qcom,duty",
-					&pwm_dev->pwm_config.pwm_duty);
-	of_property_read_u32(of_node, "qcom,ramp-step-duration",
-					&lut_config->ramp_step_ms);
-	of_property_read_u32(of_node, "qcom,lpg-lut-pause-hi",
-					&lut_config->lut_pause_hi_cnt);
-	of_property_read_u32(of_node, "qcom,lpg-lut-pause-lo",
-					&lut_config->lut_pause_lo_cnt);
-	of_property_read_u32(of_node, "qcom,lpg-lut-ramp-direction",
-					(u32 *)&lut_config->ramp_direction);
-	of_property_read_u32(of_node, "qcom,lpg-lut-pattern-repeat",
-					(u32 *)&lut_config->pattern_repeat);
-	of_property_read_u32(of_node, "qcom,lpg-lut-ramp-toggle",
-					(u32 *)&lut_config->ramp_toggle);
-	of_property_read_u32(of_node, "qcom,lpg-lut-enable-pause-hi",
-					(u32 *)&lut_config->enable_pause_hi);
-	of_property_read_u32(of_node, "qcom,lpg-lut-enable-pause-lo",
-					(u32 *)&lut_config->enable_pause_lo);
+	of_property_read_string(node, "qcom,channel-owner",
+				&pwm_dev->pwm_config.lable);
 
 	return 0;
+
+out:
+	kfree(lut_config->duty_pct_list);
+	return rc;
 }
 
 static int __devinit qpnp_pwm_probe(struct spmi_device *spmi)
@@ -1572,7 +1589,7 @@
 	chip->pwm_dev.chip = chip;
 	dev_set_drvdata(&spmi->dev, chip);
 
-	rc = qpnp_lpg_get_dt_config(spmi, chip);
+	rc = qpnp_parse_dt_config(spmi, chip);
 
 	if (rc)
 		goto failed_config;
@@ -1610,7 +1627,6 @@
 	if (chip) {
 		lpg_config = &chip->lpg_config;
 		kfree(lpg_config->lut_config.duty_pct_list);
-		kfree(lpg_config->lut_config.def_config.duty_pct_list);
 		mutex_destroy(&chip->lpg_mutex);
 		kfree(chip);
 	}
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index d3edfa8..b7c73de 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -18,13 +18,14 @@
 #include <linux/io.h>
 #include <linux/stat.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/usb/msm_hsusb.h>
 #include <mach/usb_bam.h>
 #include <mach/sps.h>
 #include <linux/workqueue.h>
 
 #define USB_SUMMING_THRESHOLD 512
-#define CONNECTIONS_NUM		4
+#define CONNECTIONS_NUM	4
 
 static struct sps_bam_props usb_props;
 static struct sps_pipe *sps_pipes[CONNECTIONS_NUM][2];
@@ -43,32 +44,35 @@
 
 struct usb_bam_connect_info {
 	u8 idx;
-	u8 *src_pipe;
-	u8 *dst_pipe;
+	u32 *src_pipe;
+	u32 *dst_pipe;
 	struct usb_bam_wake_event_info peer_event;
 	bool enabled;
 };
 
 static struct usb_bam_connect_info usb_bam_connections[CONNECTIONS_NUM];
+static struct usb_bam_pipe_connect ***msm_usb_bam_connections_info;
+static struct usb_bam_pipe_connect *bam_connection_arr;
+
+static bool device_tree_enabled;
 
 static inline int bam_offset(struct msm_usb_bam_platform_data *pdata)
 {
 	return pdata->usb_active_bam * CONNECTIONS_NUM * 2;
 }
 
-static int connect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
-						u8 *usb_pipe_idx)
+static int connect_pipe(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+						u32 *usb_pipe_idx)
 {
 	int ret;
-	struct sps_pipe **pipe = &sps_pipes[connection_idx][pipe_dir];
+	struct sps_pipe **pipe = &sps_pipes[conn_idx][pipe_dir];
 	struct sps_connect *connection =
-		&sps_connections[connection_idx][pipe_dir];
+		&sps_connections[conn_idx][pipe_dir];
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 	struct usb_bam_pipe_connect *pipe_connection =
-			(struct usb_bam_pipe_connect *)(pdata->connections +
-			 bam_offset(pdata) + (2*connection_idx+pipe_dir));
+		(struct usb_bam_pipe_connect *)(pdata->connections +
+			 bam_offset(pdata) + (2*conn_idx+pipe_dir));
 
 	*pipe = sps_alloc_endpoint();
 	if (*pipe == NULL) {
@@ -105,26 +109,54 @@
 		*usb_pipe_idx = connection->dest_pipe_index;
 	}
 
-	ret = sps_setup_bam2bam_fifo(
-				&data_mem_buf[connection_idx][pipe_dir],
+	if (!device_tree_enabled) {
+		ret = sps_setup_bam2bam_fifo(
+				&data_mem_buf[conn_idx][pipe_dir],
 				pipe_connection->data_fifo_base_offset,
 				pipe_connection->data_fifo_size, 1);
-	if (ret) {
-		pr_err("%s: data fifo setup failure %d\n", __func__, ret);
-		goto fifo_setup_error;
-	}
-	connection->data = data_mem_buf[connection_idx][pipe_dir];
+		if (ret) {
+			pr_err("%s: data fifo setup failure %d\n", __func__,
+				ret);
+			goto fifo_setup_error;
+		}
 
-	ret = sps_setup_bam2bam_fifo(
-				&desc_mem_buf[connection_idx][pipe_dir],
+		ret = sps_setup_bam2bam_fifo(
+				&desc_mem_buf[conn_idx][pipe_dir],
 				pipe_connection->desc_fifo_base_offset,
 				pipe_connection->desc_fifo_size, 1);
-	if (ret) {
-		pr_err("%s: desc. fifo setup failure %d\n", __func__, ret);
-		goto fifo_setup_error;
+		if (ret) {
+			pr_err("%s: desc. fifo setup failure %d\n", __func__,
+				ret);
+			goto fifo_setup_error;
+		}
+	} else {
+		data_mem_buf[conn_idx][pipe_dir].phys_base =
+			pipe_connection->data_fifo_base_offset +
+				pdata->usb_base_address;
+		data_mem_buf[conn_idx][pipe_dir].size =
+			pipe_connection->data_fifo_size;
+		data_mem_buf[conn_idx][pipe_dir].base =
+			ioremap(data_mem_buf[conn_idx][pipe_dir].phys_base,
+				data_mem_buf[conn_idx][pipe_dir].size);
+		memset(data_mem_buf[conn_idx][pipe_dir].base, 0,
+			data_mem_buf[conn_idx][pipe_dir].size);
+
+		desc_mem_buf[conn_idx][pipe_dir].phys_base =
+			pipe_connection->desc_fifo_base_offset +
+				pdata->usb_base_address;
+		desc_mem_buf[conn_idx][pipe_dir].size =
+			pipe_connection->desc_fifo_size;
+		desc_mem_buf[conn_idx][pipe_dir].base =
+			ioremap(desc_mem_buf[conn_idx][pipe_dir].phys_base,
+				desc_mem_buf[conn_idx][pipe_dir].size);
+		memset(desc_mem_buf[conn_idx][pipe_dir].base, 0,
+			desc_mem_buf[conn_idx][pipe_dir].size);
 	}
-	connection->desc = desc_mem_buf[connection_idx][pipe_dir];
+
+	connection->data = data_mem_buf[conn_idx][pipe_dir];
+	connection->desc = desc_mem_buf[conn_idx][pipe_dir];
 	connection->event_thresh = 16;
+	connection->options = SPS_O_AUTO_ENABLE;
 
 	ret = sps_connect(*pipe, connection);
 	if (ret < 0) {
@@ -141,7 +173,22 @@
 	return ret;
 }
 
-int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx)
+
+static int disconnect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
+						u32 *usb_pipe_idx)
+{
+	struct sps_pipe *pipe = sps_pipes[connection_idx][pipe_dir];
+	struct sps_connect *connection =
+		&sps_connections[connection_idx][pipe_dir];
+
+	sps_disconnect(pipe);
+	sps_free_endpoint(pipe);
+
+	connection->options &= ~SPS_O_AUTO_ENABLE;
+	return 0;
+}
+
+int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx)
 {
 	struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
 	int ret;
@@ -153,7 +200,7 @@
 	}
 
 	if (connection->enabled) {
-		pr_info("%s: connection %d was already established\n",
+		pr_debug("%s: connection %d was already established\n",
 			__func__, idx);
 		return 0;
 	}
@@ -161,19 +208,23 @@
 	connection->dst_pipe = dst_pipe_idx;
 	connection->idx = idx;
 
-	/* open USB -> Peripheral pipe */
-	ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
-					   connection->src_pipe);
-	if (ret) {
-		pr_err("%s: src pipe connection failure\n", __func__);
-		return ret;
+	if (src_pipe_idx) {
+		/* open USB -> Peripheral pipe */
+		ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
+			connection->src_pipe);
+		if (ret) {
+			pr_err("%s: src pipe connection failure\n", __func__);
+			return ret;
+		}
 	}
-	/* open Peripheral -> USB pipe */
-	ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
-				 connection->dst_pipe);
-	if (ret) {
-		pr_err("%s: dst pipe connection failure\n", __func__);
-		return ret;
+	if (dst_pipe_idx) {
+		/* open Peripheral -> USB pipe */
+		ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
+			connection->dst_pipe);
+		if (ret) {
+			pr_err("%s: dst pipe connection failure\n", __func__);
+			return ret;
+		}
 	}
 	connection->enabled = 1;
 
@@ -232,19 +283,259 @@
 	return 0;
 }
 
+int usb_bam_disconnect_pipe(u8 idx)
+{
+	struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
+	int ret;
+
+	if (idx >= CONNECTIONS_NUM) {
+		pr_err("%s: Invalid connection index\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!connection->enabled) {
+		pr_debug("%s: connection %d isn't enabled\n",
+			__func__, idx);
+		return 0;
+	}
+
+	if (connection->src_pipe) {
+		/* close USB -> Peripheral pipe */
+		ret = disconnect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
+						   connection->src_pipe);
+		if (ret) {
+			pr_err("%s: src pipe connection failure\n", __func__);
+			return ret;
+		}
+
+	}
+	if (connection->dst_pipe) {
+		/* close Peripheral -> USB pipe */
+		ret = disconnect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
+			connection->dst_pipe);
+		if (ret) {
+			pr_err("%s: dst pipe connection failure\n", __func__);
+			return ret;
+		}
+	}
+
+	connection->src_pipe = 0;
+	connection->dst_pipe = 0;
+	connection->enabled = 0;
+
+	return 0;
+}
+
+static int update_connections_info(struct device_node *node, int bam,
+	int conn_num, int dir)
+{
+	u32 rc;
+	char *key = NULL;
+	uint32_t val = 0;
+
+	struct usb_bam_pipe_connect *pipe_connection;
+
+	pipe_connection = &msm_usb_bam_connections_info[bam][conn_num][dir];
+
+	key = "qcom,src-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_phy_addr = val;
+
+	key = "qcom,src-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->src_pipe_index = val;
+
+	key = "qcom,dst-bam-physical-address";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_phy_addr = val;
+
+	key = "qcom,dst-bam-pipe-index";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->dst_pipe_index = val;
+
+	key = "qcom,data-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_base_offset = val;
+
+	key = "qcom,data-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->data_fifo_size = val;
+
+	key = "qcom,descriptor-fifo-offset";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->desc_fifo_base_offset = val;
+
+	key = "qcom,descriptor-fifo-size";
+	rc = of_property_read_u32(node, key, &val);
+	if (rc)
+		goto err;
+	pipe_connection->desc_fifo_size = val;
+
+	return 0;
+
+err:
+	pr_err("%s: Error in name %s key %s\n", __func__,
+		node->full_name, key);
+	return -EFAULT;
+}
+
+static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
+	struct platform_device *pdev)
+{
+	struct msm_usb_bam_platform_data *pdata;
+	struct device_node *node = pdev->dev.of_node;
+	u32 i, j;
+	int conn_num, bam;
+	u8 dir;
+	u8 ncolumns = 2;
+	int bam_amount, rc = 0;
+	u32 pipe_entry = 0;
+	char *key = NULL;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("unable to allocate platform data\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-active-bam",
+		&pdata->usb_active_bam);
+	if (rc) {
+		pr_err("Invalid usb active bam property\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-total-bam-num",
+		&pdata->total_bam_num);
+	if (rc) {
+		pr_err("Invalid usb total bam num property\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-bam-num-pipes",
+		&pdata->usb_bam_num_pipes);
+	if (rc) {
+		pr_err("Invalid usb bam num pipes property\n");
+		return NULL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,usb-base-address",
+		&pdata->usb_base_address);
+	if (rc) {
+		pr_err("Invalid usb base address property\n");
+		return NULL;
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, node)
+		pipe_entry++;
+
+	/*
+	 * we need to know the number of connection, so we will know
+	 * how much memory to allocate
+	 */
+	conn_num = pipe_entry / 2;
+	bam_amount = pdata->total_bam_num;
+
+	if (conn_num > 0 && conn_num < pdata->usb_bam_num_pipes) {
+		/* alloc msm_usb_bam_connections_info */
+		bam_connection_arr = devm_kzalloc(&pdev->dev, bam_amount *
+			conn_num * ncolumns *
+			sizeof(struct usb_bam_pipe_connect), GFP_KERNEL);
+
+		if (!bam_connection_arr)
+			goto err;
+
+		msm_usb_bam_connections_info = devm_kzalloc(&pdev->dev,
+			bam_amount * sizeof(struct usb_bam_pipe_connect **),
+			GFP_KERNEL);
+
+		if (!msm_usb_bam_connections_info)
+			goto err;
+
+		for (j = 0; j < bam_amount; j++) {
+			msm_usb_bam_connections_info[j] =
+				devm_kzalloc(&pdev->dev, conn_num *
+				sizeof(struct usb_bam_pipe_connect *),
+				GFP_KERNEL);
+			for (i = 0; i < conn_num; i++)
+				msm_usb_bam_connections_info[j][i] =
+					bam_connection_arr +
+					(j * conn_num * ncolumns) +
+					(i * ncolumns);
+		}
+
+		/* retrieve device tree parameters */
+		for_each_child_of_node(pdev->dev.of_node, node) {
+			const char *str;
+
+			key = "qcom,usb-bam-type";
+			rc = of_property_read_u32(node, key, &bam);
+			if (rc)
+				goto err;
+
+			rc = of_property_read_string(node, "label", &str);
+			if (rc) {
+				pr_err("Cannot read string\n");
+				goto err;
+			}
+
+			if (strstr(str, "usb-to-peri"))
+				dir = USB_TO_PEER_PERIPHERAL;
+			else if (strstr(str, "peri-to-usb"))
+				dir = PEER_PERIPHERAL_TO_USB;
+			else
+				goto err;
+
+			if (!strcmp(str, "usb-to-peri-qdss-dwc3") ||
+				!strcmp(str, "peri-to-usb-qdss-dwc3"))
+					conn_num = 0;
+			else
+				goto err;
+
+			rc = update_connections_info(node, bam, conn_num, dir);
+			if (rc)
+				goto err;
+		}
+
+		pdata->connections = &msm_usb_bam_connections_info[0][0][0];
+
+	} else {
+		goto err;
+	}
+
+	return pdata;
+err:
+	pr_err("%s: failed\n", __func__);
+	return NULL;
+}
+
 static int usb_bam_init(void)
 {
 	u32 h_usb;
 	int ret;
 	void *usb_virt_addr;
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 	struct resource *res;
 	int irq;
 
 	res = platform_get_resource(usb_bam_pdev, IORESOURCE_MEM,
-						pdata->usb_active_bam);
+		pdata->usb_active_bam);
 	if (!res) {
 		dev_err(&usb_bam_pdev->dev, "Unable to get memory resource\n");
 		return -ENODEV;
@@ -266,6 +557,7 @@
 	usb_props.virt_size = resource_size(res);
 	usb_props.irq = irq;
 	usb_props.summing_threshold = USB_SUMMING_THRESHOLD;
+	usb_props.event_threshold = 512;
 	usb_props.num_pipes = pdata->usb_bam_num_pipes;
 
 	ret = sps_register_bam_device(&usb_props, &h_usb);
@@ -286,11 +578,10 @@
 usb_bam_show_enable(struct device *dev, struct device_attribute *attr,
 		    char *buf)
 {
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
+	struct platform_device *pdev =
+		container_of(dev, struct platform_device, dev);
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 
 	if (!pdev || !pdata)
 		return 0;
@@ -302,11 +593,10 @@
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
+	struct platform_device *pdev = container_of(dev,
+		struct platform_device, dev);
 	struct msm_usb_bam_platform_data *pdata =
-		(struct msm_usb_bam_platform_data *)
-			(usb_bam_pdev->dev.platform_data);
+		usb_bam_pdev->dev.platform_data;
 	char str[10], *pstr;
 	int ret, i;
 
@@ -336,6 +626,7 @@
 static int usb_bam_probe(struct platform_device *pdev)
 {
 	int ret, i;
+	struct msm_usb_bam_platform_data *pdata;
 
 	dev_dbg(&pdev->dev, "usb_bam_probe\n");
 
@@ -345,9 +636,19 @@
 			usb_bam_wake_work);
 	}
 
-	if (!pdev->dev.platform_data) {
+	if (pdev->dev.of_node) {
+		dev_dbg(&pdev->dev, "device tree enabled\n");
+		device_tree_enabled = 1;
+		pdata = usb_bam_dt_to_pdata(pdev);
+		if (!pdata)
+			return -ENOMEM;
+		pdev->dev.platform_data = pdata;
+	} else if (!pdev->dev.platform_data) {
 		dev_err(&pdev->dev, "missing platform_data\n");
 		return -ENODEV;
+	} else {
+		pdata = pdev->dev.platform_data;
+		device_tree_enabled = 0;
 	}
 	usb_bam_pdev = pdev;
 
@@ -365,6 +666,32 @@
 	return ret;
 }
 
+void get_bam2bam_connection_info(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+	u32 *usb_bam_handle, u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+	struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo)
+{
+	struct sps_connect *connection =
+		&sps_connections[conn_idx][pipe_dir];
+
+
+	if (pipe_dir == USB_TO_PEER_PERIPHERAL) {
+		*usb_bam_handle = connection->source;
+		*usb_bam_pipe_idx = connection->src_pipe_index;
+		*peer_pipe_idx = connection->dest_pipe_index;
+	} else {
+		*usb_bam_handle = connection->destination;
+		*usb_bam_pipe_idx = connection->dest_pipe_index;
+		*peer_pipe_idx = connection->src_pipe_index;
+	}
+	if (data_fifo)
+		memcpy(data_fifo, &data_mem_buf[conn_idx][pipe_dir],
+			sizeof(struct sps_mem_buffer));
+	if (desc_fifo)
+		memcpy(desc_fifo, &desc_mem_buf[conn_idx][pipe_dir],
+			sizeof(struct sps_mem_buffer));
+}
+EXPORT_SYMBOL(get_bam2bam_connection_info);
+
 static int usb_bam_remove(struct platform_device *pdev)
 {
 	destroy_workqueue(usb_bam_wq);
@@ -372,10 +699,20 @@
 	return 0;
 }
 
+static const struct of_device_id usb_bam_dt_match[] = {
+	{ .compatible = "qcom,usb-bam-msm",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, usb_bam_dt_match);
+
 static struct platform_driver usb_bam_driver = {
 	.probe = usb_bam_probe,
 	.remove = usb_bam_remove,
-	.driver = { .name = "usb_bam", },
+	.driver		= {
+		.name	= "usb_bam",
+		.of_match_table = usb_bam_dt_match,
+	},
 };
 
 static int __init init(void)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index e87b4bd..352e60e 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -76,12 +76,29 @@
 	if (psy->set_property)
 		return psy->set_property(psy, POWER_SUPPLY_PROP_SCOPE,
 								&ret);
-
 	return -ENXIO;
 }
 EXPORT_SYMBOL_GPL(power_supply_set_scope);
 
 /**
+ * power_supply_set_supply_type - set type of the power supply
+ * @psy:	the power supply to control
+ * @supply_type:	sets type property of power supply
+ */
+int power_supply_set_supply_type(struct power_supply *psy,
+				enum power_supply_type supply_type)
+{
+	const union power_supply_propval ret = {supply_type,};
+
+	if (psy->set_property)
+		return psy->set_property(psy, POWER_SUPPLY_PROP_TYPE,
+								&ret);
+
+	return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(power_supply_set_supply_type);
+
+/**
  * power_supply_set_charge_type - set charge type of the power supply
  * @psy:	the power supply to control
  * @enable:	sets charge type property of power supply
diff --git a/drivers/slimbus/slim-msm-ctrl.c b/drivers/slimbus/slim-msm-ctrl.c
index fa9d1df..6eb5d60 100644
--- a/drivers/slimbus/slim-msm-ctrl.c
+++ b/drivers/slimbus/slim-msm-ctrl.c
@@ -1746,9 +1746,6 @@
 		},
 	};
 
-	if (!dev->use_rx_msgqs)
-		goto init_rx_msgq;
-
 	bam_props.ee = dev->ee;
 	bam_props.virt_addr = dev->bam.base;
 	bam_props.phys_addr = bam_mem->start;
@@ -1784,7 +1781,7 @@
 	ret = msm_slim_init_rx_msgq(dev);
 	if (ret)
 		dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
-	if (!dev->use_rx_msgqs && bam_handle) {
+	if (ret && bam_handle) {
 		sps_deregister_bam_device(bam_handle);
 		dev->bam.hdl = 0L;
 	}
@@ -1850,6 +1847,7 @@
 	struct resource		*bam_mem, *bam_io;
 	struct resource		*slim_mem, *slim_io;
 	struct resource		*irq, *bam_irq;
+	bool			rxreg_access = false;
 	slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"slimbus_physical");
 	if (!slim_mem) {
@@ -1922,13 +1920,15 @@
 			dev_err(&pdev->dev, "Cell index not specified:%d", ret);
 			goto err_of_init_failed;
 		}
+		rxreg_access = of_property_read_bool(pdev->dev.of_node,
+					"qcom,rxreg-access");
 		/* Optional properties */
 		ret = of_property_read_u32(pdev->dev.of_node,
 					"qcom,min-clk-gear", &dev->ctrl.min_cg);
 		ret = of_property_read_u32(pdev->dev.of_node,
 					"qcom,max-clk-gear", &dev->ctrl.max_cg);
-		pr_err("min_cg:%d, max_cg:%d, ret:%d", dev->ctrl.min_cg,
-					dev->ctrl.max_cg, ret);
+		pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
+					dev->ctrl.max_cg, rxreg_access);
 	} else {
 		dev->ctrl.nr = pdev->id;
 	}
@@ -1947,7 +1947,11 @@
 	mutex_init(&dev->tx_lock);
 	spin_lock_init(&dev->rx_lock);
 	dev->ee = 1;
-	dev->use_rx_msgqs = 1;
+	if (rxreg_access)
+		dev->use_rx_msgqs = 0;
+	else
+		dev->use_rx_msgqs = 1;
+
 	dev->irq = irq->start;
 	dev->bam.irq = bam_irq->start;
 
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index ac88636..49d7c0f 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -99,7 +99,7 @@
 	char *dev_name;
 	struct device_attribute **attributes;
 
-	/* for android_dev.enabled_functions */
+	/* for android_conf.enabled_functions */
 	struct list_head enabled_list;
 
 	struct android_dev *android_dev;
@@ -127,8 +127,8 @@
 };
 
 struct android_dev {
+	const char *name;
 	struct android_usb_function **functions;
-	struct list_head enabled_functions;
 	struct usb_composite_dev *cdev;
 	struct device *dev;
 
@@ -143,9 +143,23 @@
 	struct pm_qos_request pm_qos_req_dma;
 	struct work_struct work;
 
+	/* A list of struct android_configuration */
+	struct list_head configs;
+	int configs_num;
+
+	/* A list node inside the android_dev_list */
 	struct list_head list_item;
 
-	struct usb_configuration config;
+};
+
+struct android_configuration {
+	struct usb_configuration usb_config;
+
+	/* A list of the functions supported by this config */
+	struct list_head enabled_functions;
+
+	/* A list node inside the struct android_dev.configs list */
+	struct list_head list_item;
 };
 
 static struct class *android_class;
@@ -154,6 +168,10 @@
 static int android_bind_config(struct usb_configuration *c);
 static void android_unbind_config(struct usb_configuration *c);
 static struct android_dev *cdev_to_android_dev(struct usb_composite_dev *cdev);
+static struct android_configuration *alloc_android_config
+						(struct android_dev *dev);
+static void free_android_config(struct android_dev *dev,
+				struct android_configuration *conf);
 
 /* string IDs are assigned dynamically */
 #define STRING_MANUFACTURER_IDX		0
@@ -296,13 +314,17 @@
 static void android_enable(struct android_dev *dev)
 {
 	struct usb_composite_dev *cdev = dev->cdev;
+	struct android_configuration *conf;
 
 	if (WARN_ON(!dev->disable_depth))
 		return;
 
 	if (--dev->disable_depth == 0) {
-		usb_add_config(cdev, &dev->config,
-					android_bind_config);
+
+		list_for_each_entry(conf, &dev->configs, list_item)
+			usb_add_config(cdev, &conf->usb_config,
+						android_bind_config);
+
 		usb_gadget_connect(cdev->gadget);
 	}
 }
@@ -310,12 +332,15 @@
 static void android_disable(struct android_dev *dev)
 {
 	struct usb_composite_dev *cdev = dev->cdev;
+	struct android_configuration *conf;
 
 	if (dev->disable_depth++ == 0) {
 		usb_gadget_disconnect(cdev->gadget);
 		/* Cancel pending control requests */
 		usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
-		usb_remove_config(cdev, &dev->config);
+
+		list_for_each_entry(conf, &dev->configs, list_item)
+			usb_remove_config(cdev, &conf->usb_config);
 	}
 }
 
@@ -1555,9 +1580,11 @@
 			       struct usb_configuration *c)
 {
 	struct android_usb_function *f;
+	struct android_configuration *conf =
+		container_of(c, struct android_configuration, usb_config);
 	int ret;
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+	list_for_each_entry(f, &conf->enabled_functions, enabled_list) {
 		ret = f->bind_config(f, c);
 		if (ret) {
 			pr_err("%s: %s failed", __func__, f->name);
@@ -1572,25 +1599,30 @@
 			       struct usb_configuration *c)
 {
 	struct android_usb_function *f;
+	struct android_configuration *conf =
+		container_of(c, struct android_configuration, usb_config);
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+	list_for_each_entry(f, &conf->enabled_functions, enabled_list) {
 		if (f->unbind_config)
 			f->unbind_config(f, c);
 	}
 }
 
-static int android_enable_function(struct android_dev *dev, char *name)
+static int android_enable_function(struct android_dev *dev,
+				   struct android_configuration *conf,
+				   char *name)
 {
 	struct android_usb_function **functions = dev->functions;
 	struct android_usb_function *f;
 	while ((f = *functions++)) {
 		if (!strcmp(name, f->name)) {
 			if (f->android_dev)
-				pr_err("%s cannot be enabled on two devices\n",
+				pr_err("%s already enabled in other " \
+					"configuration or device\n",
 					f->name);
 			else {
 				list_add_tail(&f->enabled_list,
-					      &dev->enabled_functions);
+					      &conf->enabled_functions);
 				f->android_dev = dev;
 				return 0;
 			}
@@ -1606,9 +1638,20 @@
 		struct device_attribute *attr, char *buf)
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_configuration *conf;
+
+	/*
+	 * Show the wakeup attribute of the first configuration,
+	 * since all configurations have the same wakeup attribute
+	 */
+	if (dev->configs_num == 0)
+		return 0;
+	conf = list_entry(dev->configs.next,
+			  struct android_configuration,
+			  list_item);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-			!!(dev->config.bmAttributes &
+			!!(conf->usb_config.bmAttributes &
 				USB_CONFIG_ATT_WAKEUP));
 }
 
@@ -1616,6 +1659,7 @@
 		struct device_attribute *attr, const char *buff, size_t size)
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_configuration *conf;
 	int enable = 0;
 
 	sscanf(buff, "%d", &enable);
@@ -1623,10 +1667,13 @@
 	pr_debug("android_usb: %s remote wakeup\n",
 			enable ? "enabling" : "disabling");
 
-	if (enable)
-		dev->config.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
-	else
-		dev->config.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
+	list_for_each_entry(conf, &dev->configs, list_item)
+		if (enable)
+			conf->usb_config.bmAttributes |=
+					USB_CONFIG_ATT_WAKEUP;
+		else
+			conf->usb_config.bmAttributes &=
+					~USB_CONFIG_ATT_WAKEUP;
 
 	return size;
 }
@@ -1635,13 +1682,18 @@
 functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
+	struct android_configuration *conf;
 	struct android_usb_function *f;
 	char *buff = buf;
 
 	mutex_lock(&dev->mutex);
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list)
-		buff += snprintf(buff, PAGE_SIZE, "%s,", f->name);
+	list_for_each_entry(conf, &dev->configs, list_item) {
+		if (buff != buf)
+			*(buff-1) = ':';
+		list_for_each_entry(f, &conf->enabled_functions, enabled_list)
+			buff += snprintf(buff, PAGE_SIZE, "%s,", f->name);
+	}
 
 	mutex_unlock(&dev->mutex);
 
@@ -1656,6 +1708,9 @@
 {
 	struct android_dev *dev = dev_get_drvdata(pdev);
 	struct android_usb_function *f;
+	struct list_head *curr_conf = &dev->configs;
+	struct android_configuration *conf;
+	char *conf_str;
 	char *name;
 	char buf[256], *b;
 	int err;
@@ -1668,21 +1723,45 @@
 	}
 
 	/* Clear previous enabled list */
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-		f->android_dev = NULL;
+	list_for_each_entry(conf, &dev->configs, list_item) {
+		list_for_each_entry(f, &conf->enabled_functions, enabled_list)
+			f->android_dev = NULL;
+		INIT_LIST_HEAD(&conf->enabled_functions);
 	}
-	INIT_LIST_HEAD(&dev->enabled_functions);
 
 	strlcpy(buf, buff, sizeof(buf));
 	b = strim(buf);
 
 	while (b) {
-		name = strsep(&b, ",");
-		if (name) {
-			err = android_enable_function(dev, name);
-			if (err)
-				pr_err("android_usb: Cannot enable '%s'", name);
+		conf_str = strsep(&b, ":");
+		if (conf_str) {
+			/* If the next not equal to the head, take it */
+			if (curr_conf->next != &dev->configs)
+				conf = list_entry(curr_conf->next,
+						  struct android_configuration,
+						  list_item);
+			else
+				conf = alloc_android_config(dev);
+
+			curr_conf = curr_conf->next;
 		}
+
+		while (conf_str) {
+			name = strsep(&conf_str, ",");
+			if (name) {
+				err = android_enable_function(dev, conf, name);
+				if (err)
+					pr_err("android_usb: Cannot enable %s",
+						name);
+			}
+		}
+	}
+
+	/* Free uneeded configurations if exists */
+	while (curr_conf->next != &dev->configs) {
+		conf = list_entry(curr_conf->next,
+				  struct android_configuration, list_item);
+		free_android_config(dev, conf);
 	}
 
 	mutex_unlock(&dev->mutex);
@@ -1704,6 +1783,7 @@
 	struct android_dev *dev = dev_get_drvdata(pdev);
 	struct usb_composite_dev *cdev = dev->cdev;
 	struct android_usb_function *f;
+	struct android_configuration *conf;
 	int enabled = 0;
 
 	if (!cdev)
@@ -1723,18 +1803,22 @@
 		cdev->desc.bDeviceClass = device_desc.bDeviceClass;
 		cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
 		cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
-		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-			if (f->enable)
-				f->enable(f);
-		}
+		list_for_each_entry(conf, &dev->configs, list_item)
+			list_for_each_entry(f, &conf->enabled_functions,
+						enabled_list) {
+				if (f->enable)
+					f->enable(f);
+			}
 		android_enable(dev);
 		dev->enabled = true;
 	} else if (!enabled && dev->enabled) {
 		android_disable(dev);
-		list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-			if (f->disable)
-				f->disable(f);
-		}
+		list_for_each_entry(conf, &dev->configs, list_item)
+			list_for_each_entry(f, &conf->enabled_functions,
+						enabled_list) {
+				if (f->disable)
+					f->disable(f);
+			}
 		dev->enabled = false;
 	} else {
 		pr_err("android_usb: already %s\n",
@@ -1890,6 +1974,7 @@
 {
 	struct android_dev *dev;
 	struct usb_gadget	*gadget = cdev->gadget;
+	struct android_configuration *conf;
 	int			gcnum, id, ret;
 
 	/* Bind to the last android_dev that was probed */
@@ -1938,7 +2023,8 @@
 	device_desc.iSerialNumber = id;
 
 	if (gadget_is_otg(cdev->gadget))
-		dev->config.descriptors = otg_desc;
+		list_for_each_entry(conf, &dev->configs, list_item)
+			conf->usb_config.descriptors = otg_desc;
 
 	gcnum = usb_gadget_controller_number(gadget);
 	if (gcnum >= 0)
@@ -1979,6 +2065,7 @@
 	struct android_dev		*dev = cdev_to_android_dev(cdev);
 	struct usb_request		*req = cdev->req;
 	struct android_usb_function	*f;
+	struct android_configuration	*conf;
 	int value = -EOPNOTSUPP;
 	unsigned long flags;
 
@@ -1987,13 +2074,16 @@
 	req->length = 0;
 	gadget->ep0->driver_data = cdev;
 
-	list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
-		if (f->ctrlrequest) {
-			value = f->ctrlrequest(f, cdev, c);
-			if (value >= 0)
-				break;
-		}
-	}
+	list_for_each_entry(conf, &dev->configs, list_item)
+		if (&conf->usb_config == cdev->config)
+			list_for_each_entry(f,
+					    &conf->enabled_functions,
+					    enabled_list)
+				if (f->ctrlrequest) {
+					value = f->ctrlrequest(f, cdev, c);
+					if (value >= 0)
+						break;
+				}
 
 	/* Special case the accessory function.
 	 * It needs to handle control requests before it is enabled.
@@ -2084,6 +2174,38 @@
 	return dev;
 }
 
+static struct android_configuration *alloc_android_config
+						(struct android_dev *dev)
+{
+	struct android_configuration *conf;
+
+	conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+	if (!conf) {
+		pr_err("%s(): Failed to alloc memory for android conf\n",
+			__func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dev->configs_num++;
+	conf->usb_config.label = dev->name;
+	conf->usb_config.unbind = android_unbind_config;
+	conf->usb_config.bConfigurationValue = dev->configs_num;
+
+	INIT_LIST_HEAD(&conf->enabled_functions);
+
+	list_add_tail(&conf->list_item, &dev->configs);
+
+	return conf;
+}
+
+static void free_android_config(struct android_dev *dev,
+			     struct android_configuration *conf)
+{
+	list_del(&conf->list_item);
+	dev->configs_num--;
+	kfree(conf);
+}
+
 static int __devinit android_probe(struct platform_device *pdev)
 {
 	struct android_usb_platform_data *pdata = pdev->dev.platform_data;
@@ -2104,12 +2226,11 @@
 		goto err_alloc;
 	}
 
-	android_dev->config.label = pdev->name;
-	android_dev->config.unbind = android_unbind_config;
-	android_dev->config.bConfigurationValue = 1;
+	android_dev->name = pdev->name;
 	android_dev->disable_depth = 1;
 	android_dev->functions = supported_functions;
-	INIT_LIST_HEAD(&android_dev->enabled_functions);
+	android_dev->configs_num = 0;
+	INIT_LIST_HEAD(&android_dev->configs);
 	INIT_WORK(&android_dev->work, android_work);
 	mutex_init(&android_dev->mutex);
 
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index 1fade88..a9e5d91 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -98,8 +98,8 @@
 	struct usb_request	*rx_req;
 	struct usb_request	*tx_req;
 
-	u8					src_pipe_idx;
-	u8					dst_pipe_idx;
+	u32					src_pipe_idx;
+	u32					dst_pipe_idx;
 	u8					connection_idx;
 
 	/* stats */
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 73b4e75..a105f5d 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -50,8 +50,8 @@
 	struct usb_request	*rx_req;
 	struct usb_request	*tx_req;
 
-	u8			src_pipe_idx;
-	u8			dst_pipe_idx;
+	u32			src_pipe_idx;
+	u32			dst_pipe_idx;
 	u8			connection_idx;
 };
 
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 8a87a6a..c612cb9 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -54,6 +54,10 @@
 	bool					async_int;
 	bool					vbus_on;
 	atomic_t				in_lpm;
+	int					pmic_gpio_dp_irq;
+	bool					pmic_gpio_dp_irq_enabled;
+	uint32_t				pmic_gpio_int_cnt;
+	atomic_t				pm_usage_cnt;
 	struct wake_lock			wlock;
 };
 
@@ -603,6 +607,11 @@
 
 	atomic_set(&mhcd->in_lpm, 1);
 	enable_irq(hcd->irq);
+	if (mhcd->pmic_gpio_dp_irq) {
+		mhcd->pmic_gpio_dp_irq_enabled = 1;
+		enable_irq_wake(mhcd->pmic_gpio_dp_irq);
+		enable_irq(mhcd->pmic_gpio_dp_irq);
+	}
 	wake_unlock(&mhcd->wlock);
 
 	dev_info(mhcd->dev, "EHCI USB in low power mode\n");
@@ -622,6 +631,11 @@
 		return 0;
 	}
 
+	if (mhcd->pmic_gpio_dp_irq_enabled) {
+		disable_irq_wake(mhcd->pmic_gpio_dp_irq);
+		disable_irq_nosync(mhcd->pmic_gpio_dp_irq);
+		mhcd->pmic_gpio_dp_irq_enabled = 0;
+	}
 	wake_lock(&mhcd->wlock);
 
 	/* Vote for TCXO when waking up the phy */
@@ -669,6 +683,11 @@
 		enable_irq(hcd->irq);
 	}
 
+	if (atomic_read(&mhcd->pm_usage_cnt)) {
+		atomic_set(&mhcd->pm_usage_cnt, 0);
+		pm_runtime_put_noidle(mhcd->dev);
+	}
+
 	dev_info(mhcd->dev, "EHCI USB exited from low power mode\n");
 
 	return 0;
@@ -689,6 +708,32 @@
 	return ehci_irq(hcd);
 }
 
+static irqreturn_t msm_ehci_host_wakeup_irq(int irq, void *data)
+{
+
+	struct msm_hcd *mhcd = data;
+
+	mhcd->pmic_gpio_int_cnt++;
+	dev_dbg(mhcd->dev, "%s: hsusb host remote wakeup interrupt cnt: %u\n",
+			__func__, mhcd->pmic_gpio_int_cnt);
+
+
+	wake_lock(&mhcd->wlock);
+
+	if (mhcd->pmic_gpio_dp_irq_enabled) {
+		mhcd->pmic_gpio_dp_irq_enabled = 0;
+		disable_irq_wake(irq);
+		disable_irq_nosync(irq);
+	}
+
+	if (!atomic_read(&mhcd->pm_usage_cnt)) {
+		atomic_set(&mhcd->pm_usage_cnt, 1);
+		pm_runtime_get(mhcd->dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
 static int msm_ehci_reset(struct usb_hcd *hcd)
 {
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -952,6 +997,22 @@
 	 * hence, runtime framework automatically calls this driver's
 	 * runtime APIs based on root-hub's state.
 	 */
+	/* configure pmic_gpio_irq for D+ change */
+	if (pdata && pdata->pmic_gpio_dp_irq)
+		mhcd->pmic_gpio_dp_irq = pdata->pmic_gpio_dp_irq;
+	if (mhcd->pmic_gpio_dp_irq) {
+		ret = request_threaded_irq(mhcd->pmic_gpio_dp_irq, NULL,
+				msm_ehci_host_wakeup_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"msm_ehci_host_wakeup", mhcd);
+		if (!ret) {
+			disable_irq_nosync(mhcd->pmic_gpio_dp_irq);
+		} else {
+			dev_err(&pdev->dev, "request_irq(%d) failed: %d\n",
+					mhcd->pmic_gpio_dp_irq, ret);
+			mhcd->pmic_gpio_dp_irq = 0;
+		}
+	}
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
@@ -984,6 +1045,11 @@
 	struct usb_hcd *hcd = platform_get_drvdata(pdev);
 	struct msm_hcd *mhcd = hcd_to_mhcd(hcd);
 
+	if (mhcd->pmic_gpio_dp_irq) {
+		if (mhcd->pmic_gpio_dp_irq_enabled)
+			disable_irq_wake(mhcd->pmic_gpio_dp_irq);
+		free_irq(mhcd->pmic_gpio_dp_irq, mhcd);
+	}
 	device_init_wakeup(&pdev->dev, 0);
 	pm_runtime_disable(&pdev->dev);
 	pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 13828e0..3aa2e5c 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -293,3 +293,13 @@
 	  driver for dial up network and RMNET.
 	  To compile this driver as a module, choose M here: the module
 	  will be called mdm_bridge. If unsure, choose N.
+
+config USB_QCOM_KS_BRIDGE
+	tristate "USB Qualcomm kick start bridge"
+	depends on USB
+	help
+	  Say Y here if you have a Qualcomm modem device connected via USB that
+	  will be bridged in kernel space. This driver works as a bridge to pass
+	  boot images, ram-dumps and efs sync
+	  To compile this driver as a module, choose M here: the module
+	  will be called ks_bridge. If unsure, choose N.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index b4aee65..447e4d2 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -33,3 +33,4 @@
 obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE_TEST)	+= diag_bridge_test.o
 mdm_bridge-y				:= mdm_ctrl_bridge.o mdm_data_bridge.o
 obj-$(CONFIG_USB_QCOM_MDM_BRIDGE) 	+= mdm_bridge.o
+obj-$(CONFIG_USB_QCOM_KS_BRIDGE)	+= ks_bridge.o
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
new file mode 100644
index 0000000..10cbe59
--- /dev/null
+++ b/drivers/usb/misc/ks_bridge.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* add additional information to our printk's */
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#define DRIVER_DESC	"USB host ks bridge driver"
+#define DRIVER_VERSION	"1.0"
+
+struct data_pkt {
+	int			n_read;
+	char			*buf;
+	size_t			len;
+	struct list_head	list;
+	void			*ctxt;
+};
+
+#define FILE_OPENED		BIT(0)
+#define USB_DEV_CONNECTED	BIT(1)
+#define NO_RX_REQS		10
+#define NO_BRIDGE_INSTANCES	2
+#define BOOT_BRIDGE_INDEX	0
+#define EFS_BRIDGE_INDEX	1
+#define MAX_DATA_PKT_SIZE	16384
+
+struct ks_bridge {
+	char			*name;
+	spinlock_t		lock;
+	struct workqueue_struct	*wq;
+	struct work_struct	to_mdm_work;
+	struct work_struct	start_rx_work;
+	struct list_head	to_mdm_list;
+	struct list_head	to_ks_list;
+	wait_queue_head_t	ks_wait_q;
+
+	/* usb specific */
+	struct usb_device	*udev;
+	struct usb_interface	*ifc;
+	__u8			in_epAddr;
+	__u8			out_epAddr;
+	unsigned int		in_pipe;
+	unsigned int		out_pipe;
+	struct usb_anchor	submitted;
+
+	unsigned long		flags;
+	unsigned int		alloced_read_pkts;
+
+#define DBG_MSG_LEN   40
+#define DBG_MAX_MSG   500
+	unsigned int	dbg_idx;
+	rwlock_t	dbg_lock;
+	char     (dbgbuf[DBG_MAX_MSG])[DBG_MSG_LEN];   /* buffer */
+};
+struct ks_bridge *__ksb[NO_BRIDGE_INSTANCES];
+
+/* by default debugging is enabled */
+static unsigned int enable_dbg = 1;
+module_param(enable_dbg, uint, S_IRUGO | S_IWUSR);
+
+static void
+dbg_log_event(struct ks_bridge *ksb, char *event, int d1, int d2)
+{
+	unsigned long flags;
+	unsigned long long t;
+	unsigned long nanosec;
+
+	if (!enable_dbg)
+		return;
+
+	write_lock_irqsave(&ksb->dbg_lock, flags);
+	t = cpu_clock(smp_processor_id());
+	nanosec = do_div(t, 1000000000)/1000;
+	scnprintf(ksb->dbgbuf[ksb->dbg_idx], DBG_MSG_LEN, "%5lu.%06lu:%s:%x:%x",
+			(unsigned long)t, nanosec, event, d1, d2);
+
+	ksb->dbg_idx++;
+	ksb->dbg_idx = ksb->dbg_idx % DBG_MAX_MSG;
+	write_unlock_irqrestore(&ksb->dbg_lock, flags);
+}
+
+static
+struct data_pkt *ksb_alloc_data_pkt(size_t count, gfp_t flags, void *ctxt)
+{
+	struct data_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct data_pkt), flags);
+	if (!pkt) {
+		pr_err("failed to allocate data packet\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pkt->buf = kmalloc(count, flags);
+	if (!pkt->buf) {
+		pr_err("failed to allocate data buffer\n");
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pkt->len = count;
+	INIT_LIST_HEAD(&pkt->list);
+	pkt->ctxt = ctxt;
+
+	return pkt;
+}
+
+static void ksb_free_data_pkt(struct data_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+
+static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
+				size_t count, loff_t *pos)
+{
+	int ret;
+	unsigned long flags;
+	struct ks_bridge *ksb = fp->private_data;
+	struct data_pkt *pkt;
+	size_t space, copied;
+
+read_start:
+	if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+		return -ENODEV;
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	if (list_empty(&ksb->to_ks_list)) {
+		spin_unlock_irqrestore(&ksb->lock, flags);
+		ret = wait_event_interruptible(ksb->ks_wait_q,
+				!list_empty(&ksb->to_ks_list) ||
+				!test_bit(USB_DEV_CONNECTED, &ksb->flags));
+		if (ret < 0)
+			return ret;
+
+		goto read_start;
+	}
+
+	space = count;
+	copied = 0;
+	while (!list_empty(&ksb->to_ks_list) && space) {
+		size_t len;
+
+		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
+		len = min_t(size_t, space, pkt->len);
+		pkt->n_read += len;
+		spin_unlock_irqrestore(&ksb->lock, flags);
+
+		ret = copy_to_user(buf + copied, pkt->buf, len);
+		if (ret) {
+			pr_err("copy_to_user failed err:%d\n", ret);
+			ksb_free_data_pkt(pkt);
+			ksb->alloced_read_pkts--;
+			return ret;
+		}
+
+		space -= len;
+		copied += len;
+
+		spin_lock_irqsave(&ksb->lock, flags);
+		if (pkt->n_read == pkt->len) {
+			list_del_init(&pkt->list);
+			ksb_free_data_pkt(pkt);
+			ksb->alloced_read_pkts--;
+		}
+	}
+	spin_unlock_irqrestore(&ksb->lock, flags);
+
+	dbg_log_event(ksb, "KS_READ", copied, 0);
+
+	pr_debug("count:%d space:%d copied:%d", count, space, copied);
+
+	return copied;
+}
+
+static void ksb_tx_cb(struct urb *urb)
+{
+	struct data_pkt *pkt = urb->context;
+	struct ks_bridge *ksb = pkt->ctxt;
+
+	dbg_log_event(ksb, "C TX_URB", urb->status, 0);
+	pr_debug("status:%d", urb->status);
+
+	if (ksb->ifc)
+		usb_autopm_put_interface_async(ksb->ifc);
+
+	if (urb->status < 0)
+		pr_err_ratelimited("urb failed with err:%d", urb->status);
+
+	ksb_free_data_pkt(pkt);
+}
+
+static void ksb_tomdm_work(struct work_struct *w)
+{
+	struct ks_bridge *ksb = container_of(w, struct ks_bridge, to_mdm_work);
+	struct data_pkt	*pkt;
+	unsigned long flags;
+	struct urb *urb;
+	int ret;
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	while (!list_empty(&ksb->to_mdm_list)
+			&& test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+		pkt = list_first_entry(&ksb->to_mdm_list,
+				struct data_pkt, list);
+		list_del_init(&pkt->list);
+		spin_unlock_irqrestore(&ksb->lock, flags);
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			pr_err_ratelimited("unable to allocate urb");
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+
+		ret = usb_autopm_get_interface(ksb->ifc);
+		if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+			pr_err_ratelimited("autopm_get failed:%d", ret);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+		usb_fill_bulk_urb(urb, ksb->udev, ksb->out_pipe,
+				pkt->buf, pkt->len, ksb_tx_cb, pkt);
+		usb_anchor_urb(urb, &ksb->submitted);
+
+		dbg_log_event(ksb, "S TX_URB", pkt->len, 0);
+
+		ret = usb_submit_urb(urb, GFP_KERNEL);
+		if (ret) {
+			pr_err("out urb submission failed");
+			usb_unanchor_urb(urb);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			usb_autopm_put_interface(ksb->ifc);
+			return;
+		}
+
+		spin_lock_irqsave(&ksb->lock, flags);
+	}
+	spin_unlock_irqrestore(&ksb->lock, flags);
+}
+
+static ssize_t ksb_fs_write(struct file *fp, const char __user *buf,
+				 size_t count, loff_t *pos)
+{
+	int			ret;
+	struct data_pkt		*pkt;
+	unsigned long		flags;
+	struct ks_bridge	*ksb = fp->private_data;
+
+	pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
+	if (IS_ERR(pkt)) {
+		pr_err("unable to allocate data packet");
+		return PTR_ERR(pkt);
+	}
+
+	ret = copy_from_user(pkt->buf, buf, count);
+	if (ret) {
+		pr_err("copy_from_user failed: err:%d", ret);
+		ksb_free_data_pkt(pkt);
+		return ret;
+	}
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	list_add_tail(&pkt->list, &ksb->to_mdm_list);
+	spin_unlock_irqrestore(&ksb->lock, flags);
+
+	queue_work(ksb->wq, &ksb->to_mdm_work);
+
+	return count;
+}
+
+static int efs_fs_open(struct inode *ip, struct file *fp)
+{
+	struct ks_bridge *ksb = __ksb[EFS_BRIDGE_INDEX];
+
+	pr_debug(":%s", ksb->name);
+	dbg_log_event(ksb, "EFS-FS-OPEN", 0, 0);
+
+	if (!ksb) {
+		pr_err("ksb is being removed");
+		return -ENODEV;
+	}
+
+	fp->private_data = ksb;
+	set_bit(FILE_OPENED, &ksb->flags);
+
+	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+		queue_work(ksb->wq, &ksb->start_rx_work);
+
+	return 0;
+}
+
+static int ksb_fs_open(struct inode *ip, struct file *fp)
+{
+	struct ks_bridge *ksb = __ksb[BOOT_BRIDGE_INDEX];
+
+	pr_debug(":%s", ksb->name);
+	dbg_log_event(ksb, "KS-FS-OPEN", 0, 0);
+
+	if (!ksb) {
+		pr_err("ksb is being removed");
+		return -ENODEV;
+	}
+
+	fp->private_data = ksb;
+	set_bit(FILE_OPENED, &ksb->flags);
+
+	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+		queue_work(ksb->wq, &ksb->start_rx_work);
+
+	return 0;
+}
+
+static int ksb_fs_release(struct inode *ip, struct file *fp)
+{
+	struct ks_bridge	*ksb = fp->private_data;
+
+	pr_debug(":%s", ksb->name);
+	dbg_log_event(ksb, "FS-RELEASE", 0, 0);
+
+	clear_bit(FILE_OPENED, &ksb->flags);
+	fp->private_data = NULL;
+
+	return 0;
+}
+
+static const struct file_operations ksb_fops = {
+	.owner = THIS_MODULE,
+	.read = ksb_fs_read,
+	.write = ksb_fs_write,
+	.open = ksb_fs_open,
+	.release = ksb_fs_release,
+};
+
+static struct miscdevice ksb_fboot_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "ks_bridge",
+	.fops = &ksb_fops,
+};
+
+static const struct file_operations efs_fops = {
+	.owner = THIS_MODULE,
+	.read = ksb_fs_read,
+	.write = ksb_fs_write,
+	.open = efs_fs_open,
+	.release = ksb_fs_release,
+};
+
+static struct miscdevice ksb_efs_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "efs_bridge",
+	.fops = &efs_fops,
+};
+
+static const struct usb_device_id ksb_usb_ids[] = {
+	{ USB_DEVICE(0x5c6, 0x9008),
+	.driver_info = (unsigned long)&ksb_fboot_dev, },
+	{ USB_DEVICE(0x5c6, 0x9048),
+	.driver_info = (unsigned long)&ksb_efs_dev, },
+	{ USB_DEVICE(0x5c6, 0x904C),
+	.driver_info = (unsigned long)&ksb_efs_dev, },
+
+	{} /* terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, ksb_usb_ids);
+
+static void ksb_rx_cb(struct urb *urb);
+static void submit_one_urb(struct ks_bridge *ksb)
+{
+	struct data_pkt	*pkt;
+	struct urb *urb;
+	int ret;
+
+	pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb);
+	if (IS_ERR(pkt)) {
+		pr_err("unable to allocate data pkt");
+		return;
+	}
+
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		pr_err("unable to allocate urb");
+		ksb_free_data_pkt(pkt);
+		return;
+	}
+	ksb->alloced_read_pkts++;
+
+	usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+			pkt->buf, pkt->len,
+			ksb_rx_cb, pkt);
+	usb_anchor_urb(urb, &ksb->submitted);
+
+	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	if (ret) {
+		pr_err("in urb submission failed");
+		usb_unanchor_urb(urb);
+		usb_free_urb(urb);
+		ksb_free_data_pkt(pkt);
+		ksb->alloced_read_pkts--;
+		return;
+	}
+
+	usb_free_urb(urb);
+}
+static void ksb_rx_cb(struct urb *urb)
+{
+	struct data_pkt *pkt = urb->context;
+	struct ks_bridge *ksb = pkt->ctxt;
+
+	dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);
+
+	pr_debug("status:%d actual:%d", urb->status, urb->actual_length);
+
+	if (urb->status < 0) {
+		if (urb->status != -ESHUTDOWN && urb->status != -ENOENT)
+			pr_err_ratelimited("urb failed with err:%d",
+					urb->status);
+		ksb_free_data_pkt(pkt);
+		ksb->alloced_read_pkts--;
+		return;
+	}
+
+	if (urb->actual_length == 0) {
+		ksb_free_data_pkt(pkt);
+		ksb->alloced_read_pkts--;
+		goto resubmit_urb;
+	}
+
+	spin_lock(&ksb->lock);
+	pkt->len = urb->actual_length;
+	list_add_tail(&pkt->list, &ksb->to_ks_list);
+	spin_unlock(&ksb->lock);
+
+	/* wake up read thread */
+	wake_up(&ksb->ks_wait_q);
+
+resubmit_urb:
+	submit_one_urb(ksb);
+
+}
+
+static void ksb_start_rx_work(struct work_struct *w)
+{
+	struct ks_bridge *ksb =
+			container_of(w, struct ks_bridge, start_rx_work);
+	struct data_pkt	*pkt;
+	struct urb *urb;
+	int i = 0;
+	int ret;
+
+	for (i = 0; i < NO_RX_REQS; i++) {
+		pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
+		if (IS_ERR(pkt)) {
+			pr_err("unable to allocate data pkt");
+			return;
+		}
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			pr_err("unable to allocate urb");
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+
+		ret = usb_autopm_get_interface(ksb->ifc);
+		if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+			pr_err_ratelimited("autopm_get failed:%d", ret);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			return;
+		}
+		ksb->alloced_read_pkts++;
+
+		usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+				pkt->buf, pkt->len,
+				ksb_rx_cb, pkt);
+		usb_anchor_urb(urb, &ksb->submitted);
+
+		dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+		ret = usb_submit_urb(urb, GFP_KERNEL);
+		if (ret) {
+			pr_err("in urb submission failed");
+			usb_unanchor_urb(urb);
+			usb_free_urb(urb);
+			ksb_free_data_pkt(pkt);
+			ksb->alloced_read_pkts--;
+			usb_autopm_put_interface(ksb->ifc);
+			return;
+		}
+
+		usb_autopm_put_interface_async(ksb->ifc);
+		usb_free_urb(urb);
+	}
+}
+
+static int
+ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
+{
+	__u8				ifc_num;
+	struct usb_host_interface	*ifc_desc;
+	struct usb_endpoint_descriptor	*ep_desc;
+	int				i;
+	struct ks_bridge		*ksb;
+	struct miscdevice		*fs_dev;
+
+	ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;
+
+	switch (id->idProduct) {
+	case 0x9008:
+		if (ifc_num != 0)
+			return -ENODEV;
+		ksb = __ksb[BOOT_BRIDGE_INDEX];
+		break;
+	case 0x9048:
+	case 0x904C:
+		if (ifc_num != 2)
+			return -ENODEV;
+		ksb = __ksb[EFS_BRIDGE_INDEX];
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	if (!ksb) {
+		pr_err("ksb is not initialized");
+		return -ENODEV;
+	}
+
+	ksb->udev = usb_get_dev(interface_to_usbdev(ifc));
+	ksb->ifc = ifc;
+	ifc_desc = ifc->cur_altsetting;
+
+	for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
+		ep_desc = &ifc_desc->endpoint[i].desc;
+
+		if (!ksb->in_epAddr && usb_endpoint_is_bulk_in(ep_desc))
+			ksb->in_epAddr = ep_desc->bEndpointAddress;
+
+		if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
+			ksb->out_epAddr = ep_desc->bEndpointAddress;
+	}
+
+	if (!(ksb->in_epAddr && ksb->out_epAddr)) {
+		pr_err("could not find bulk in and bulk out endpoints");
+		usb_put_dev(ksb->udev);
+		ksb->ifc = NULL;
+		return -ENODEV;
+	}
+
+	ksb->in_pipe = usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr);
+	ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr);
+
+	usb_set_intfdata(ifc, ksb);
+	set_bit(USB_DEV_CONNECTED, &ksb->flags);
+
+	dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);
+
+	fs_dev = (struct miscdevice *)id->driver_info;
+	misc_register(fs_dev);
+
+	usb_enable_autosuspend(ksb->udev);
+
+	pr_debug("usb dev connected");
+
+	return 0;
+}
+
+static int ksb_usb_suspend(struct usb_interface *ifc, pm_message_t message)
+{
+	struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+	dbg_log_event(ksb, "SUSPEND", 0, 0);
+
+	pr_info("read cnt: %d", ksb->alloced_read_pkts);
+
+	usb_kill_anchored_urbs(&ksb->submitted);
+
+	return 0;
+}
+
+static int ksb_usb_resume(struct usb_interface *ifc)
+{
+	struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+	dbg_log_event(ksb, "RESUME", 0, 0);
+
+	if (test_bit(FILE_OPENED, &ksb->flags))
+		queue_work(ksb->wq, &ksb->start_rx_work);
+
+	return 0;
+}
+
+static void ksb_usb_disconnect(struct usb_interface *ifc)
+{
+	struct ks_bridge *ksb = usb_get_intfdata(ifc);
+	unsigned long flags;
+	struct data_pkt *pkt;
+
+	dbg_log_event(ksb, "PID-DETACH", 0, 0);
+
+	clear_bit(USB_DEV_CONNECTED, &ksb->flags);
+	wake_up(&ksb->ks_wait_q);
+	cancel_work_sync(&ksb->to_mdm_work);
+
+	usb_kill_anchored_urbs(&ksb->submitted);
+
+	spin_lock_irqsave(&ksb->lock, flags);
+	while (!list_empty(&ksb->to_ks_list)) {
+		pkt = list_first_entry(&ksb->to_ks_list,
+				struct data_pkt, list);
+		list_del_init(&pkt->list);
+		ksb_free_data_pkt(pkt);
+	}
+	while (!list_empty(&ksb->to_mdm_list)) {
+		pkt = list_first_entry(&ksb->to_mdm_list,
+				struct data_pkt, list);
+		list_del_init(&pkt->list);
+		ksb_free_data_pkt(pkt);
+	}
+	spin_unlock_irqrestore(&ksb->lock, flags);
+
+	usb_put_dev(ksb->udev);
+	ksb->ifc = NULL;
+	usb_set_intfdata(ifc, NULL);
+
+	return;
+}
+
+static struct usb_driver ksb_usb_driver = {
+	.name =		"ks_bridge",
+	.probe =	ksb_usb_probe,
+	.disconnect =	ksb_usb_disconnect,
+	.suspend =	ksb_usb_suspend,
+	.resume =	ksb_usb_resume,
+	.id_table =	ksb_usb_ids,
+	.supports_autosuspend = 1,
+};
+
+static ssize_t ksb_debug_show(struct seq_file *s, void *unused)
+{
+	unsigned long		flags;
+	struct ks_bridge	*ksb = s->private;
+	int			i;
+
+	read_lock_irqsave(&ksb->dbg_lock, flags);
+	for (i = 0; i < DBG_MAX_MSG; i++) {
+		if (i == (ksb->dbg_idx - 1))
+			seq_printf(s, "-->%s\n", ksb->dbgbuf[i]);
+		else
+			seq_printf(s, "%s\n", ksb->dbgbuf[i]);
+	}
+	read_unlock_irqrestore(&ksb->dbg_lock, flags);
+
+	return 0;
+}
+
+static int ksb_debug_open(struct inode *ip, struct file *fp)
+{
+	return single_open(fp, ksb_debug_show, ip->i_private);
+
+	return 0;
+}
+
+static const struct file_operations dbg_fops = {
+	.open = ksb_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+static struct dentry *dbg_dir;
+static int __init ksb_init(void)
+{
+	struct ks_bridge *ksb;
+	int num_instances = 0;
+	int ret = 0;
+	int i;
+
+	dbg_dir = debugfs_create_dir("ks_bridge", NULL);
+	if (IS_ERR(dbg_dir))
+		pr_err("unable to create debug dir");
+
+	for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+		ksb = kzalloc(sizeof(struct ks_bridge), GFP_KERNEL);
+		if (!ksb) {
+			pr_err("unable to allocat mem for ks_bridge");
+			return -ENOMEM;
+		}
+		__ksb[i] = ksb;
+
+		ksb->name = kasprintf(GFP_KERNEL, "ks_bridge:%i", i + 1);
+		if (!ksb->name) {
+			pr_info("unable to allocate name");
+			kfree(ksb);
+			ret = -ENOMEM;
+			goto dev_free;
+		}
+
+		spin_lock_init(&ksb->lock);
+		INIT_LIST_HEAD(&ksb->to_mdm_list);
+		INIT_LIST_HEAD(&ksb->to_ks_list);
+		init_waitqueue_head(&ksb->ks_wait_q);
+		ksb->wq = create_singlethread_workqueue(ksb->name);
+		if (!ksb->wq) {
+			pr_err("unable to allocate workqueue");
+			kfree(ksb->name);
+			kfree(ksb);
+			ret = -ENOMEM;
+			goto dev_free;
+		}
+
+		INIT_WORK(&ksb->to_mdm_work, ksb_tomdm_work);
+		INIT_WORK(&ksb->start_rx_work, ksb_start_rx_work);
+		init_usb_anchor(&ksb->submitted);
+
+		ksb->dbg_idx = 0;
+		ksb->dbg_lock = __RW_LOCK_UNLOCKED(lck);
+
+		if (!IS_ERR(dbg_dir))
+			debugfs_create_file(ksb->name, S_IRUGO, dbg_dir,
+					ksb, &dbg_fops);
+
+		num_instances++;
+	}
+
+	ret = usb_register(&ksb_usb_driver);
+	if (ret) {
+		pr_err("unable to register ks bridge driver");
+		goto dev_free;
+	}
+
+	pr_info("init done");
+
+	return 0;
+
+dev_free:
+	if (!IS_ERR(dbg_dir))
+		debugfs_remove_recursive(dbg_dir);
+
+	for (i = 0; i < num_instances; i++) {
+		ksb = __ksb[i];
+
+		destroy_workqueue(ksb->wq);
+		kfree(ksb->name);
+		kfree(ksb);
+	}
+
+	return ret;
+
+}
+
+static void __exit ksb_exit(void)
+{
+	struct ks_bridge *ksb;
+	int i;
+
+	if (!IS_ERR(dbg_dir))
+		debugfs_remove_recursive(dbg_dir);
+
+	usb_deregister(&ksb_usb_driver);
+
+	for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+		ksb = __ksb[i];
+
+		destroy_workqueue(ksb->wq);
+		kfree(ksb->name);
+		kfree(ksb);
+	}
+}
+
+module_init(ksb_init);
+module_exit(ksb_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 8e6f347..c397f84 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1432,11 +1432,9 @@
 		outpdw(MDP_BASE + 0x0004, 0);
 	} else if (term == MDP_OVERLAY1_TERM) {
 		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		mdp_lut_enable();
 		outpdw(MDP_BASE + 0x0008, 0);
 	} else if (term == MDP_OVERLAY2_TERM) {
 		mdp_pipe_ctrl(MDP_OVERLAY2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		mdp_lut_enable();
 		outpdw(MDP_BASE + 0x00D0, 0);
 	}
 #else
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 413b239..5879530 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -1817,7 +1817,13 @@
 		op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
 		outpdw(base + 0x0058, op_mode);
 		outpdw(base + 0x1008, 0);	/* black */
+		/*
+		 * Set src size and dst size same to avoid underruns
+		 */
+		outpdw(base + 0x0000, inpdw(base + 0x0008));
 	} else {
+		u32 src_size = ((pipe->src_h << 16) | pipe->src_w);
+		outpdw(base + 0x0000, src_size);
 		format &= ~MDP4_FORMAT_SOLID_FILL;
 		blend->solidfill_pipe = NULL;
 	}
@@ -2922,30 +2928,32 @@
 
 int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime)
 {
-	if (info->node == 0) {
+	if (!hdmi_prim_display && info->node == 0) {
 		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
 			mdp4_dsi_video_wait4vsync(0, vtime);
 		else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 			mdp4_dsi_cmd_wait4vsync(0, vtime);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 			mdp4_lcdc_wait4vsync(0, vtime);
-	} else if (info->node == 1)
+	} else if (hdmi_prim_display || info->node == 1) {
 		mdp4_dtv_wait4vsync(0, vtime);
+	}
 
 	return 0;
 }
 
 int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable)
 {
-	if (info->node == 0) {
+	if (!hdmi_prim_display && info->node == 0) {
 		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
 			mdp4_dsi_video_vsync_ctrl(0, enable);
 		else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 			mdp4_dsi_cmd_vsync_ctrl(0, enable);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 			mdp4_lcdc_vsync_ctrl(0, enable);
-	} else if (info->node == 1)
+	} else if (hdmi_prim_display || info->node == 1) {
 		mdp4_dtv_vsync_ctrl(0, enable);
+	}
 
 	return 0;
 }
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 398b1e6..6445ec1 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -455,6 +455,7 @@
 
 	vctrl->mfd = mfd;
 	vctrl->dev = mfd->fbi->dev;
+	vctrl->fake_vsync = 1;
 
 	/* mdp clock on */
 	mdp_clk_ctrl(1);
@@ -513,6 +514,8 @@
 	pipe->src_w = fbi->var.xres;
 	pipe->src_y = 0;
 	pipe->src_x = 0;
+	pipe->dst_h = fbi->var.yres;
+	pipe->dst_w = fbi->var.xres;
 	pipe->srcp0_ystride = fbi->fix.line_length;
 	pipe->bpp = bpp;
 
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 57a07d0..f3d9e2c 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -73,6 +73,7 @@
 	int dmae_wait_cnt;
 	int wait_vsync_cnt;
 	int blt_change;
+	int fake_vsync;
 	struct mutex update_lock;
 	struct completion dmae_comp;
 	struct completion vsync_comp;
@@ -236,6 +237,11 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 
+	if (vctrl->fake_vsync) {
+		vctrl->fake_vsync = 0;
+		schedule_work(&vctrl->vsync_work);
+	}
+
 	if (vctrl->vsync_irq_enabled == enable)
 		return;
 
@@ -521,6 +527,7 @@
 		return -EINVAL;
 
 	vctrl->dev = mfd->fbi->dev;
+	vctrl->fake_vsync = 1;
 
 	mdp_footswitch_ctrl(TRUE);
 	/* Mdp clock enable */
@@ -587,6 +594,7 @@
 
 	ret = panel_next_off(pdev);
 	mdp_footswitch_ctrl(FALSE);
+	vctrl->fake_vsync = 1;
 
 	/* Mdp clock disable */
 	mdp_clk_ctrl(0);
@@ -700,6 +708,8 @@
 	pipe->src_w = fbi->var.xres;
 	pipe->src_y = 0;
 	pipe->src_x = 0;
+	pipe->dst_h = fbi->var.yres;
+	pipe->dst_w = fbi->var.xres;
 	pipe->srcp0_ystride = fbi->fix.line_length;
 
 	ret = mdp4_overlay_format2pipe(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 2da2052..79bb7c5 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -460,6 +460,7 @@
 
 	vctrl->mfd = mfd;
 	vctrl->dev = mfd->fbi->dev;
+	vctrl->fake_vsync = 1;
 
 	/* mdp clock on */
 	mdp_clk_ctrl(1);
@@ -503,6 +504,8 @@
 	pipe->src_w = fbi->var.xres;
 	pipe->src_y = 0;
 	pipe->src_x = 0;
+	pipe->dst_h = fbi->var.yres;
+	pipe->dst_w = fbi->var.xres;
 
 	if (mfd->display_iova)
 		pipe->srcp0_addr = mfd->display_iova + buf_offset;
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 492437e..b6294f4 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -7,4 +7,10 @@
 mdss-mdp-objs += mdss_mdp_wb.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
+
+mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o
+mdss-dsi-objs += mdss_dsi_panel.o
+mdss-dsi-objs += msm_mdss_io_8974.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
+
 obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index a58c3e6..6145d67 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -22,6 +22,7 @@
 #define MDSS_REG_READ(addr) readl_relaxed(mdss_reg_base + addr)
 
 extern unsigned char *mdss_reg_base;
+extern spinlock_t dsi_clk_lock;
 
 enum mdss_mdp_clk_type {
 	MDSS_CLK_AHB,
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
new file mode 100644
index 0000000..d051828
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -0,0 +1,383 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_dsi.h"
+
+static struct mdss_panel_common_pdata *panel_pdata;
+
+static unsigned char *mdss_dsi_base;
+
+static int mdss_dsi_off(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mdss_panel_info *pinfo;
+
+	pinfo = &pdata->panel_info;
+
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+		mdss_dsi_controller_cfg(0, pdata);
+
+	mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
+
+	ret = panel_pdata->off(pdata);
+	if (ret) {
+		pr_err("%s: Panel OFF failed\n", __func__);
+		return ret;
+	}
+
+	spin_lock_bh(&dsi_clk_lock);
+	mdss_dsi_clk_disable();
+
+	/* disable dsi engine */
+	MIPI_OUTP(mdss_dsi_base + 0x0004, 0);
+
+	spin_unlock_bh(&dsi_clk_lock);
+
+	mdss_dsi_unprepare_clocks();
+
+	pr_debug("%s-:\n", __func__);
+
+	return ret;
+}
+
+static int mdss_dsi_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	u32 clk_rate;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	u32 ystride, bpp, data;
+	u32 dummy_xres, dummy_yres;
+
+	pinfo = &pdata->panel_info;
+
+	cont_splash_clk_ctrl(0);
+	mdss_dsi_prepare_clocks();
+
+	spin_lock_bh(&dsi_clk_lock);
+
+	MIPI_OUTP(mdss_dsi_base + 0x118, 1);
+	MIPI_OUTP(mdss_dsi_base + 0x118, 0);
+
+	mdss_dsi_clk_enable();
+	spin_unlock_bh(&dsi_clk_lock);
+
+	clk_rate = pdata->panel_info.clk_rate;
+	clk_rate = min(clk_rate, pdata->panel_info.clk_max);
+
+	hbp = pdata->panel_info.lcdc.h_back_porch;
+	hfp = pdata->panel_info.lcdc.h_front_porch;
+	vbp = pdata->panel_info.lcdc.v_back_porch;
+	vfp = pdata->panel_info.lcdc.v_front_porch;
+	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	vspw = pdata->panel_info.lcdc.v_pulse_width;
+	width = pdata->panel_info.xres;
+	height = pdata->panel_info.yres;
+
+	mipi  = &pdata->panel_info.mipi;
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+		dummy_xres = pdata->panel_info.lcdc.xres_pad;
+		dummy_yres = pdata->panel_info.lcdc.yres_pad;
+
+		MIPI_OUTP(mdss_dsi_base + 0x24,
+			((hspw + hbp + width + dummy_xres) << 16 |
+			(hspw + hbp)));
+		MIPI_OUTP(mdss_dsi_base + 0x28,
+			((vspw + vbp + height + dummy_yres) << 16 |
+			(vspw + vbp)));
+		MIPI_OUTP(mdss_dsi_base + 0x2C,
+			(vspw + vbp + height + dummy_yres +
+				vfp - 1) << 16 | (hspw + hbp +
+				width + dummy_xres + hfp - 1));
+
+		MIPI_OUTP(mdss_dsi_base + 0x30, (hspw << 16));
+		MIPI_OUTP(mdss_dsi_base + 0x34, 0);
+		MIPI_OUTP(mdss_dsi_base + 0x38, (vspw << 16));
+
+	} else {		/* command mode */
+		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+			bpp = 2;
+		else
+			bpp = 3;	/* Default format set to RGB888 */
+
+		ystride = width * bpp + 1;
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
+		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+		MIPI_OUTP(mdss_dsi_base + 0x60, data);
+		MIPI_OUTP(mdss_dsi_base + 0x58, data);
+
+		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
+		data = height << 16 | width;
+		MIPI_OUTP(mdss_dsi_base + 0x64, data);
+		MIPI_OUTP(mdss_dsi_base + 0x5C, data);
+	}
+
+	mdss_dsi_host_init(mipi, pdata);
+
+	if (mipi->force_clk_lane_hs) {
+		u32 tmp;
+
+		tmp = MIPI_INP(mdss_dsi_base + 0xac);
+		tmp |= (1<<28);
+		MIPI_OUTP(mdss_dsi_base + 0xac, tmp);
+		wmb();
+	}
+
+	ret = panel_pdata->on(pdata);
+	if (ret) {
+		pr_err("%s: unable to initialize the panel\n", __func__);
+		return ret;
+	}
+
+	mdss_dsi_op_mode_config(mipi->mode, pdata);
+
+	pr_debug("%s-:\n", __func__);
+	return ret;
+}
+
+unsigned char *mdss_dsi_get_base_adr(void)
+{
+	return mdss_dsi_base;
+}
+
+unsigned char *mdss_dsi_get_clk_base(void)
+{
+	return mdss_dsi_base;
+}
+
+static int mdss_dsi_resource_initialized;
+
+static int __devinit mdss_dsi_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	pr_debug("%s\n", __func__);
+
+	if (pdev->dev.of_node && !mdss_dsi_resource_initialized) {
+		struct resource *mdss_dsi_mres;
+		pdev->id = 1;
+		mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!mdss_dsi_mres) {
+			pr_err("%s:%d unable to get the MDSS resources",
+				       __func__, __LINE__);
+			return -ENOMEM;
+		}
+		if (mdss_dsi_mres) {
+			mdss_dsi_base = ioremap(mdss_dsi_mres->start,
+				resource_size(mdss_dsi_mres));
+			if (!mdss_dsi_base) {
+				pr_err("%s:%d unable to remap dsi resources",
+					       __func__, __LINE__);
+				return -ENOMEM;
+			}
+		}
+
+		if (mdss_dsi_clk_init(pdev)) {
+			iounmap(mdss_dsi_base);
+			return -EPERM;
+		}
+
+		rc = of_platform_populate(pdev->dev.of_node,
+					NULL, NULL, &pdev->dev);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: failed to add child nodes, rc=%d\n",
+							__func__, rc);
+			iounmap(mdss_dsi_base);
+			return rc;
+		}
+
+		mdss_dsi_resource_initialized = 1;
+	}
+
+	if (!mdss_dsi_resource_initialized)
+		return -EPERM;
+
+	return 0;
+}
+
+static int __devexit mdss_dsi_remove(struct platform_device *pdev)
+{
+	struct msm_fb_data_type *mfd;
+
+	mfd = platform_get_drvdata(pdev);
+	iounmap(mdss_dsi_base);
+	return 0;
+}
+
+struct device dsi_dev;
+
+int dsi_panel_device_register(struct platform_device *pdev,
+			      struct mdss_panel_common_pdata *panel_data)
+{
+	struct mipi_panel_info *mipi;
+	int rc;
+	u8 lanes = 0, bpp;
+	u32 h_period, v_period, dsi_pclk_rate;
+	struct mdss_panel_data *pdata = NULL;
+
+	panel_pdata = panel_data;
+
+	h_period = ((panel_pdata->panel_info.lcdc.h_pulse_width)
+			+ (panel_pdata->panel_info.lcdc.h_back_porch)
+			+ (panel_pdata->panel_info.xres)
+			+ (panel_pdata->panel_info.lcdc.h_front_porch));
+
+	v_period = ((panel_pdata->panel_info.lcdc.v_pulse_width)
+			+ (panel_pdata->panel_info.lcdc.v_back_porch)
+			+ (panel_pdata->panel_info.yres)
+			+ (panel_pdata->panel_info.lcdc.v_front_porch));
+
+	mipi  = &panel_pdata->panel_info.mipi;
+
+	panel_pdata->panel_info.type =
+		((mipi->mode == DSI_VIDEO_MODE)
+			? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+	if (mipi->data_lane3)
+		lanes += 1;
+	if (mipi->data_lane2)
+		lanes += 1;
+	if (mipi->data_lane1)
+		lanes += 1;
+	if (mipi->data_lane0)
+		lanes += 1;
+
+
+	if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+	    || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888)
+	    || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE))
+		bpp = 3;
+	else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+		 || (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565))
+		bpp = 2;
+	else
+		bpp = 3;		/* Default format set to RGB888 */
+
+	if (panel_pdata->panel_info.type == MIPI_VIDEO_PANEL &&
+		!panel_pdata->panel_info.clk_rate) {
+		h_period += panel_pdata->panel_info.lcdc.xres_pad;
+		v_period += panel_pdata->panel_info.lcdc.yres_pad;
+
+		if (lanes > 0) {
+			panel_pdata->panel_info.clk_rate =
+			((h_period * v_period * (mipi->frame_rate) * bpp * 8)
+			   / lanes);
+		} else {
+			pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+			panel_pdata->panel_info.clk_rate =
+				(h_period * v_period
+					 * (mipi->frame_rate) * bpp * 8);
+		}
+	}
+	pll_divider_config.clk_rate = panel_pdata->panel_info.clk_rate;
+
+	rc = mdss_dsi_clk_div_config(bpp, lanes, &dsi_pclk_rate);
+	if (rc) {
+		pr_err("%s: unable to initialize the clk dividers\n", __func__);
+		return rc;
+	}
+
+	if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 103300000))
+		dsi_pclk_rate = 35000000;
+	mipi->dsi_pclk_rate = dsi_pclk_rate;
+
+	/*
+	 * data chain
+	 */
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	pdata->on = mdss_dsi_on;
+	pdata->off = mdss_dsi_off;
+	memcpy(&(pdata->panel_info), &(panel_pdata->panel_info),
+	       sizeof(struct mdss_panel_info));
+
+	pdata->dsi_base = mdss_dsi_base;
+
+	/*
+	 * register in mdp driver
+	 */
+	rc = mdss_register_panel(pdata);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to register MIPI DSI panel\n");
+		devm_kfree(&pdev->dev, pdata);
+		return rc;
+	}
+
+	pr_debug("%s: Panal data initialized\n", __func__);
+	return 0;
+}
+
+static const struct of_device_id msm_mdss_dsi_dt_match[] = {
+	{.compatible = "qcom,msm-mdss-dsi"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_mdss_dsi_dt_match);
+
+static struct platform_driver mdss_dsi_driver = {
+	.probe = mdss_dsi_probe,
+	.remove = __devexit_p(mdss_dsi_remove),
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdss_dsi",
+		.of_match_table = msm_mdss_dsi_dt_match,
+	},
+};
+
+static int mdss_dsi_register_driver(void)
+{
+	return platform_driver_register(&mdss_dsi_driver);
+}
+
+static int __init mdss_dsi_driver_init(void)
+{
+	int ret;
+
+	mdss_dsi_init();
+
+	ret = mdss_dsi_register_driver();
+	if (ret) {
+		pr_err("mdss_dsi_register_driver() failed!\n");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(mdss_dsi_driver_init);
+
+static void __exit mdss_dsi_driver_cleanup(void)
+{
+	iounmap(mdss_dsi_base);
+	platform_driver_unregister(&mdss_dsi_driver);
+}
+module_exit(mdss_dsi_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DSI controller driver");
+MODULE_AUTHOR("Chandan Uddaraju <chandanu@codeaurora.org>");
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
new file mode 100644
index 0000000..57fce1a
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -0,0 +1,294 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_H
+#define MDSS_DSI_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+
+#include "mdss_panel.h"
+
+#define MMSS_MDSS_CC_BASE_PHY 0xFD8C2300	/* mmss clcok control */
+#define MMSS_SERDES_BASE_PHY 0x04f01000 /* mmss (De)Serializer CFG */
+
+#define MIPI_OUTP(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP(addr) readl_relaxed(addr)
+
+#ifdef CONFIG_MSM_SECURE_IO
+#define MIPI_OUTP_SECURE(addr, data) secure_writel((data), (addr))
+#define MIPI_INP_SECURE(addr) secure_readl(addr)
+#else
+#define MIPI_OUTP_SECURE(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP_SECURE(addr) readl_relaxed(addr)
+#endif
+
+#define MIPI_DSI_PRIM 1
+#define MIPI_DSI_SECD 2
+
+#define MIPI_DSI_PANEL_VGA	0
+#define MIPI_DSI_PANEL_WVGA	1
+#define MIPI_DSI_PANEL_WVGA_PT	2
+#define MIPI_DSI_PANEL_FWVGA_PT	3
+#define MIPI_DSI_PANEL_WSVGA_PT	4
+#define MIPI_DSI_PANEL_QHD_PT 5
+#define MIPI_DSI_PANEL_WXGA	6
+#define MIPI_DSI_PANEL_WUXGA	7
+#define MIPI_DSI_PANEL_720P_PT	8
+#define DSI_PANEL_MAX	8
+
+enum {		/* mipi dsi panel */
+	DSI_VIDEO_MODE,
+	DSI_CMD_MODE,
+};
+
+enum {
+	ST_DSI_CLK_OFF,
+	ST_DSI_SUSPEND,
+	ST_DSI_RESUME,
+	ST_DSI_PLAYING,
+	ST_DSI_NUM
+};
+
+enum {
+	EV_DSI_UPDATE,
+	EV_DSI_DONE,
+	EV_DSI_TOUT,
+	EV_DSI_NUM
+};
+
+enum {
+	LANDSCAPE = 1,
+	PORTRAIT = 2,
+};
+
+enum dsi_trigger_type {
+	DSI_CMD_MODE_DMA,
+	DSI_CMD_MODE_MDP,
+};
+
+#define DSI_NON_BURST_SYNCH_PULSE	0
+#define DSI_NON_BURST_SYNCH_EVENT	1
+#define DSI_BURST_MODE			2
+
+#define DSI_RGB_SWAP_RGB	0
+#define DSI_RGB_SWAP_RBG	1
+#define DSI_RGB_SWAP_BGR	2
+#define DSI_RGB_SWAP_BRG	3
+#define DSI_RGB_SWAP_GRB	4
+#define DSI_RGB_SWAP_GBR	5
+
+#define DSI_VIDEO_DST_FORMAT_RGB565		0
+#define DSI_VIDEO_DST_FORMAT_RGB666		1
+#define DSI_VIDEO_DST_FORMAT_RGB666_LOOSE	2
+#define DSI_VIDEO_DST_FORMAT_RGB888		3
+
+#define DSI_CMD_DST_FORMAT_RGB111	0
+#define DSI_CMD_DST_FORMAT_RGB332	3
+#define DSI_CMD_DST_FORMAT_RGB444	4
+#define DSI_CMD_DST_FORMAT_RGB565	6
+#define DSI_CMD_DST_FORMAT_RGB666	7
+#define DSI_CMD_DST_FORMAT_RGB888	8
+
+#define DSI_INTR_ERROR_MASK		BIT(25)
+#define DSI_INTR_ERROR			BIT(24)
+#define DSI_INTR_VIDEO_DONE_MASK	BIT(17)
+#define DSI_INTR_VIDEO_DONE		BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK	BIT(9)
+#define DSI_INTR_CMD_MDP_DONE		BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK	BIT(1)
+#define DSI_INTR_CMD_DMA_DONE		BIT(0)
+
+#define DSI_CMD_TRIGGER_NONE		0x0	/* mdp trigger */
+#define DSI_CMD_TRIGGER_TE		0x02
+#define DSI_CMD_TRIGGER_SW		0x04
+#define DSI_CMD_TRIGGER_SW_SEOF		0x05	/* cmd dma only */
+#define DSI_CMD_TRIGGER_SW_TE		0x06
+
+extern struct device dsi_dev;
+extern int mdss_dsi_clk_on;
+extern u32 dsi_irq;
+
+struct dsiphy_pll_divider_config {
+	u32 clk_rate;
+	u32 fb_divider;
+	u32 ref_divider_ratio;
+	u32 bit_clk_divider;	/* oCLK1 */
+	u32 byte_clk_divider;	/* oCLK2 */
+	u32 analog_posDiv;
+	u32 digital_posDiv;
+};
+
+extern struct dsiphy_pll_divider_config pll_divider_config;
+
+struct dsi_clk_mnd_table {
+	u8 lanes;
+	u8 bpp;
+	u8 pll_digital_posDiv;
+	u8 pclk_m;
+	u8 pclk_n;
+	u8 pclk_d;
+};
+
+static const struct dsi_clk_mnd_table mnd_table[] = {
+	{ 1, 2,  8, 1, 1, 0},
+	{ 1, 3, 12, 1, 1, 0},
+	{ 2, 2,  4, 1, 1, 0},
+	{ 2, 3,  6, 1, 1, 0},
+	{ 3, 2,  1, 3, 8, 4},
+	{ 3, 3,  4, 1, 1, 0},
+	{ 4, 2,  2, 1, 1, 0},
+	{ 4, 3,  3, 1, 1, 0},
+};
+
+struct dsi_clk_desc {
+	u32 src;
+	u32 m;
+	u32 n;
+	u32 d;
+	u32 mnd_mode;
+	u32 pre_div_func;
+};
+
+#define DSI_HOST_HDR_SIZE	4
+#define DSI_HDR_LAST		BIT(31)
+#define DSI_HDR_LONG_PKT	BIT(30)
+#define DSI_HDR_BTA		BIT(29)
+#define DSI_HDR_VC(vc)		(((vc) & 0x03) << 22)
+#define DSI_HDR_DTYPE(dtype)	(((dtype) & 0x03f) << 16)
+#define DSI_HDR_DATA2(data)	(((data) & 0x0ff) << 8)
+#define DSI_HDR_DATA1(data)	((data) & 0x0ff)
+#define DSI_HDR_WC(wc)		((wc) & 0x0ffff)
+
+#define DSI_BUF_SIZE	1024
+#define MDSS_DSI_MRPS	0x04  /* Maximum Return Packet Size */
+
+#define MDSS_DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align  */
+
+struct dsi_buf {
+	u32 *hdr;	/* dsi host header */
+	char *start;	/* buffer start addr */
+	char *end;	/* buffer end addr */
+	int size;	/* size of buffer */
+	char *data;	/* buffer */
+	int len;	/* data length */
+	dma_addr_t dmap; /* mapped dma addr */
+};
+
+/* dcs read/write */
+#define DTYPE_DCS_WRITE		0x05	/* short write, 0 parameter */
+#define DTYPE_DCS_WRITE1	0x15	/* short write, 1 parameter */
+#define DTYPE_DCS_READ		0x06	/* read */
+#define DTYPE_DCS_LWRITE	0x39	/* long write */
+
+/* generic read/write */
+#define DTYPE_GEN_WRITE		0x03	/* short write, 0 parameter */
+#define DTYPE_GEN_WRITE1	0x13	/* short write, 1 parameter */
+#define DTYPE_GEN_WRITE2	0x23	/* short write, 2 parameter */
+#define DTYPE_GEN_LWRITE	0x29	/* long write */
+#define DTYPE_GEN_READ		0x04	/* long read, 0 parameter */
+#define DTYPE_GEN_READ1		0x14	/* long read, 1 parameter */
+#define DTYPE_GEN_READ2		0x24	/* long read, 2 parameter */
+
+#define DTYPE_TEAR_ON		0x35	/* set tear on */
+#define DTYPE_MAX_PKTSIZE	0x37	/* set max packet size */
+#define DTYPE_NULL_PKT		0x09	/* null packet, no data */
+#define DTYPE_BLANK_PKT		0x19	/* blankiing packet, no data */
+
+#define DTYPE_CM_ON		0x02	/* color mode off */
+#define DTYPE_CM_OFF		0x12	/* color mode on */
+#define DTYPE_PERIPHERAL_OFF	0x22
+#define DTYPE_PERIPHERAL_ON	0x32
+
+/*
+ * dcs response
+ */
+#define DTYPE_ACK_ERR_RESP      0x02
+#define DTYPE_EOT_RESP          0x08    /* end of tx */
+#define DTYPE_GEN_READ1_RESP    0x11    /* 1 parameter, short */
+#define DTYPE_GEN_READ2_RESP    0x12    /* 2 parameter, short */
+#define DTYPE_GEN_LREAD_RESP    0x1a
+#define DTYPE_DCS_LREAD_RESP    0x1c
+#define DTYPE_DCS_READ1_RESP    0x21    /* 1 parameter, short */
+#define DTYPE_DCS_READ2_RESP    0x22    /* 2 parameter, short */
+
+struct dsi_cmd_desc {
+	int dtype;
+	int last;
+	int vc;
+	int ack;	/* ask ACK from peripheral */
+	int wait;
+	int dlen;
+	char *payload;
+};
+
+struct dsi_kickoff_action {
+	struct list_head act_entry;
+	void (*action) (void *);
+	void *data;
+};
+
+struct mdss_panel_common_pdata {
+	struct mdss_panel_info panel_info;
+	int (*on) (struct mdss_panel_data *pdata);
+	int (*off) (struct mdss_panel_data *pdata);
+};
+
+int dsi_panel_device_register(struct platform_device *pdev,
+			      struct mdss_panel_common_pdata *panel_data);
+
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen);
+char *mdss_dsi_buf_init(struct dsi_buf *dp);
+void mdss_dsi_init(void);
+int mdss_dsi_buf_alloc(struct dsi_buf *, int size);
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
+int mdss_dsi_cmds_tx(struct mdss_panel_data *pdata,
+		struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
+
+int mdss_dsi_cmd_dma_tx(struct dsi_buf *dp,
+				struct mdss_panel_data *pdata);
+int mdss_dsi_cmd_reg_tx(u32 data,
+				struct mdss_panel_data *pdata);
+int mdss_dsi_cmds_rx(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int len);
+int mdss_dsi_cmd_dma_rx(struct dsi_buf *tp, int rlen,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_op_mode_config(int mode,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_cmd_mode_ctrl(int enable);
+void mdp4_dsi_cmd_trigger(void);
+void mdss_dsi_cmd_mdp_start(void);
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
+void mdss_dsi_ack_err_status(unsigned char *dsi_base);
+void mdss_dsi_clk_enable(void);
+void mdss_dsi_clk_disable(void);
+void mdss_dsi_controller_cfg(int enable,
+				struct mdss_panel_data *pdata);
+void mdss_dsi_sw_reset(struct mdss_panel_data *pdata);
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr);
+
+void mipi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
+int mdss_dsi_clk_div_config(u8 bpp, u8 lanes,
+			    u32 *expected_dsi_pclk);
+int mdss_dsi_clk_init(struct platform_device *pdev);
+void mdss_dsi_clk_deinit(struct device *dev);
+void mdss_dsi_prepare_clocks(void);
+void mdss_dsi_unprepare_clocks(void);
+void cont_splash_clk_ctrl(int enable);
+unsigned char *mdss_dsi_get_base_adr(void);
+
+#endif /* MDSS_DSI_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
new file mode 100644
index 0000000..7bc0105
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -0,0 +1,1259 @@
+
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+
+#include "mdss.h"
+#include "mdss_dsi.h"
+
+static struct completion dsi_dma_comp;
+static int dsi_irq_enabled;
+static spinlock_t dsi_irq_lock;
+static spinlock_t dsi_mdp_lock;
+static int dsi_mdp_busy;
+
+spinlock_t dsi_clk_lock;
+
+struct mdss_hw mdss_dsi_hw = {
+	.hw_ndx = MDSS_HW_DSI0,
+	.irq_handler = mdss_dsi_isr,
+};
+
+void mdss_dsi_init(void)
+{
+	init_completion(&dsi_dma_comp);
+	spin_lock_init(&dsi_irq_lock);
+	spin_lock_init(&dsi_mdp_lock);
+	spin_lock_init(&dsi_clk_lock);
+}
+
+void mdss_dsi_enable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_irq_lock, flags);
+	if (dsi_irq_enabled) {
+		pr_debug("%s: IRQ aleady enabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_irq_lock, flags);
+		return;
+	}
+	mdss_enable_irq(&mdss_dsi_hw);
+	dsi_irq_enabled = 1;
+	/* TO DO: Check whether MDSS IRQ is enabled */
+	spin_unlock_irqrestore(&dsi_irq_lock, flags);
+}
+
+void mdss_dsi_disable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_irq_lock, flags);
+	if (dsi_irq_enabled == 0) {
+		pr_debug("%s: IRQ already disabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_irq_lock, flags);
+		return;
+	}
+	mdss_disable_irq(&mdss_dsi_hw);
+	dsi_irq_enabled = 0;
+	/* TO DO: Check whether MDSS IRQ is Disabled */
+	spin_unlock_irqrestore(&dsi_irq_lock, flags);
+}
+
+/*
+ * mdss_dsi_disale_irq_nosync() should be called
+ * from interrupt context
+ */
+void mdss_dsi_disable_irq_nosync(void)
+{
+	spin_lock(&dsi_irq_lock);
+	if (dsi_irq_enabled == 0) {
+		pr_debug("%s: IRQ cannot be disabled\n", __func__);
+		spin_unlock(&dsi_irq_lock);
+		return;
+	}
+
+	dsi_irq_enabled = 0;
+	spin_unlock(&dsi_irq_lock);
+}
+
+/*
+ * mipi dsi buf mechanism
+ */
+char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len)
+{
+	dp->data += len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_unreserve(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_push(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	dp->len += len;
+	return dp->data;
+}
+
+char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen)
+{
+	dp->hdr = (u32 *)dp->data;
+	return mdss_dsi_buf_reserve(dp, hlen);
+}
+
+char *mdss_dsi_buf_init(struct dsi_buf *dp)
+{
+	int off;
+
+	dp->data = dp->start;
+	off = (int)dp->data;
+	/* 8 byte align */
+	off &= 0x07;
+	if (off)
+		off = 8 - off;
+	dp->data += off;
+	dp->len = 0;
+	return dp->data;
+}
+
+int mdss_dsi_buf_alloc(struct dsi_buf *dp, int size)
+{
+
+	dp->start = kmalloc(size, GFP_KERNEL);
+	if (dp->start == NULL) {
+		pr_err("%s:%u\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	dp->end = dp->start + size;
+	dp->size = size;
+
+	if ((int)dp->start & 0x07)
+		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+
+	dp->data = dp->start;
+	dp->len = 0;
+	return size;
+}
+
+/*
+ * mipi dsi generic long write
+ */
+static int mdss_dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	u32 *hp;
+	int i, len;
+
+	bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/* fill up payload */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi generic short write with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * mipi dsi gerneric read with 0, 1 2 parameters
+ */
+static int mdss_dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * mipi dsi dcs long write
+ */
+static int mdss_dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	u32 *hp;
+	int i, len;
+
+	bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/*
+	 * fill up payload
+	 * dcs command byte (first byte) followed by payload
+	 */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03;	/* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 0 parameters
+ */
+static int mdss_dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 1) ? 1 : cm->dlen;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 1 parameters
+ */
+static int mdss_dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->dlen < 2 || cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)		/* ask ACK trigger msg from peripeheral */
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs comamnd byte */
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);	/* parameter */
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+/*
+ * mipi dsi dcs read with 0 parameters
+ */
+
+static int mdss_dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);	/* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+static int mdss_dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;	/* 4 bytes */
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	int len = 0;
+
+	switch (cm->dtype) {
+	case DTYPE_GEN_WRITE:
+	case DTYPE_GEN_WRITE1:
+	case DTYPE_GEN_WRITE2:
+		len = mdss_dsi_generic_swrite(dp, cm);
+		break;
+	case DTYPE_GEN_LWRITE:
+		len = mdss_dsi_generic_lwrite(dp, cm);
+		break;
+	case DTYPE_GEN_READ:
+	case DTYPE_GEN_READ1:
+	case DTYPE_GEN_READ2:
+		len = mdss_dsi_generic_read(dp, cm);
+		break;
+	case DTYPE_DCS_LWRITE:
+		len = mdss_dsi_dcs_lwrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE:
+		len = mdss_dsi_dcs_swrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE1:
+		len = mdss_dsi_dcs_swrite1(dp, cm);
+		break;
+	case DTYPE_DCS_READ:
+		len = mdss_dsi_dcs_read(dp, cm);
+		break;
+	case DTYPE_MAX_PKTSIZE:
+		len = mdss_dsi_set_max_pktsize(dp, cm);
+		break;
+	case DTYPE_NULL_PKT:
+		len = mdss_dsi_null_pkt(dp, cm);
+		break;
+	case DTYPE_BLANK_PKT:
+		len = mdss_dsi_blank_pkt(dp, cm);
+		break;
+	case DTYPE_CM_ON:
+		len = mdss_dsi_cm_on(dp, cm);
+		break;
+	case DTYPE_CM_OFF:
+		len = mdss_dsi_cm_off(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_ON:
+		len = mdss_dsi_peripheral_on(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_OFF:
+		len = mdss_dsi_peripheral_off(dp, cm);
+		break;
+	default:
+		pr_debug("%s: dtype=%x NOT supported\n",
+					__func__, cm->dtype);
+		break;
+
+	}
+
+	return len;
+}
+
+/*
+ * mdss_dsi_short_read1_resp: 1 parameter
+ */
+static int mdss_dsi_short_read1_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 1;
+	return rp->len;
+}
+
+/*
+ * mdss_dsi_short_read2_resp: 2 parameter
+ */
+static int mdss_dsi_short_read2_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 2;
+	return rp->len;
+}
+
+static int mdss_dsi_long_read_resp(struct dsi_buf *rp)
+{
+	short len;
+
+	len = rp->data[2];
+	len <<= 8;
+	len |= rp->data[1];
+	/* strip out dcs header */
+	rp->data += 4;
+	rp->len -= 4;
+	/* strip out 2 bytes of checksum */
+	rp->len -= 2;
+	return len;
+}
+
+void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
+				struct mdss_panel_data *pdata)
+{
+	u32 dsi_ctrl, intr_ctrl;
+	u32 data;
+
+	pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
+
+	if (pinfo->mode == DSI_VIDEO_MODE) {
+		data = 0;
+		if (pinfo->pulse_mode_hsa_he)
+			data |= BIT(28);
+		if (pinfo->hfp_power_stop)
+			data |= BIT(24);
+		if (pinfo->hbp_power_stop)
+			data |= BIT(20);
+		if (pinfo->hsa_power_stop)
+			data |= BIT(16);
+		if (pinfo->eof_bllp_power_stop)
+			data |= BIT(15);
+		if (pinfo->bllp_power_stop)
+			data |= BIT(12);
+		data |= ((pinfo->traffic_mode & 0x03) << 8);
+		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+		data |= (pinfo->vc & 0x03);
+		MIPI_OUTP((pdata->dsi_base) + 0x0010, data);
+
+		data = 0;
+		data |= ((pinfo->rgb_swap & 0x07) << 12);
+		if (pinfo->b_sel)
+			data |= BIT(8);
+		if (pinfo->g_sel)
+			data |= BIT(4);
+		if (pinfo->r_sel)
+			data |= BIT(0);
+		MIPI_OUTP((pdata->dsi_base) + 0x0020, data);
+	} else if (pinfo->mode == DSI_CMD_MODE) {
+		data = 0;
+		data |= ((pinfo->interleave_max & 0x0f) << 20);
+		data |= ((pinfo->rgb_swap & 0x07) << 16);
+		if (pinfo->b_sel)
+			data |= BIT(12);
+		if (pinfo->g_sel)
+			data |= BIT(8);
+		if (pinfo->r_sel)
+			data |= BIT(4);
+		data |= (pinfo->dst_format & 0x0f);	/* 4 bits */
+		MIPI_OUTP((pdata->dsi_base) + 0x003c, data);
+
+		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+		data = pinfo->wr_mem_continue & 0x0ff;
+		data <<= 8;
+		data |= (pinfo->wr_mem_start & 0x0ff);
+		if (pinfo->insert_dcs_cmd)
+			data |= BIT(16);
+		MIPI_OUTP((pdata->dsi_base) + 0x0044, data);
+	} else
+		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+	dsi_ctrl = BIT(8) | BIT(2);	/* clock enable & cmd mode */
+	intr_ctrl = 0;
+	intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);
+
+	if (pinfo->crc_check)
+		dsi_ctrl |= BIT(24);
+	if (pinfo->ecc_check)
+		dsi_ctrl |= BIT(20);
+	if (pinfo->data_lane3)
+		dsi_ctrl |= BIT(7);
+	if (pinfo->data_lane2)
+		dsi_ctrl |= BIT(6);
+	if (pinfo->data_lane1)
+		dsi_ctrl |= BIT(5);
+	if (pinfo->data_lane0)
+		dsi_ctrl |= BIT(4);
+
+	/* from frame buffer, low power mode */
+	/* DSI_COMMAND_MODE_DMA_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0x3C, 0x14000000);
+
+	data = 0;
+	if (pinfo->te_sel)
+		data |= BIT(31);
+	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+	data |= pinfo->dma_trigger;	/* cmd dma trigger */
+	data |= (pinfo->stream & 0x01) << 8;
+	MIPI_OUTP((pdata->dsi_base) + 0x0084, data); /* DSI_TRIG_CTRL */
+
+	/* DSI_LAN_SWAP_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0x00b0, pinfo->dlane_swap);
+
+	/* clock out ctrl */
+	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
+	data <<= 8;
+	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
+	/* DSI_CLKOUT_TIMING_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0xc4, data);
+
+	data = 0;
+	if (pinfo->rx_eot_ignore)
+		data |= BIT(4);
+	if (pinfo->tx_eot_append)
+		data |= BIT(0);
+	MIPI_OUTP((pdata->dsi_base) + 0x00cc, data); /* DSI_EOT_PACKET_CTRL */
+
+
+	/* allow only ack-err-status  to generate interrupt */
+	/* DSI_ERR_INT_MASK0 */
+	MIPI_OUTP((pdata->dsi_base) + 0x010c, 0x13ff3fe0);
+
+	intr_ctrl |= DSI_INTR_ERROR_MASK;
+	MIPI_OUTP((pdata->dsi_base) + 0x0110, intr_ctrl); /* DSI_INTL_CTRL */
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP((pdata->dsi_base) + 0x11c, 0x23f); /* DSI_CLK_CTRL */
+
+	dsi_ctrl |= BIT(0);	/* enable dsi */
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+
+	wmb();
+}
+
+void mipi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata)
+{
+	u32 data = MIPI_INP((pdata->dsi_base) + 0x3c);
+
+	if (mode == 0)
+		data &= ~BIT(26);
+	else
+		data |= BIT(26);
+
+	MIPI_OUTP((pdata->dsi_base) + 0x3c, data);
+}
+
+void mdss_dsi_sw_reset(struct mdss_panel_data *pdata)
+{
+	MIPI_OUTP((pdata->dsi_base) + 0x118, 0x01);
+	wmb();
+	MIPI_OUTP((pdata->dsi_base) + 0x118, 0x00);
+	wmb();
+}
+
+void mdss_dsi_controller_cfg(int enable,
+			     struct mdss_panel_data *pdata)
+{
+
+	u32 dsi_ctrl;
+	u32 status;
+	u32 sleep_us = 1000;
+	u32 timeout_us = 16000;
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout(((pdata->dsi_base) + 0x0008),
+			   status,
+			   ((status & 0x02) == 0),
+			       sleep_us, timeout_us))
+		pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+	/* Check for x_HS_FIFO_EMPTY */
+	if (readl_poll_timeout(((pdata->dsi_base) + 0x000c),
+			   status,
+			   ((status & 0x11111000) == 0x11111000),
+			       sleep_us, timeout_us))
+		pr_info("%s: FIFO status=%x failed\n", __func__, status);
+
+	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+	if (enable)
+		dsi_ctrl |= 0x01;
+	else
+		dsi_ctrl &= ~0x01;
+
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+	wmb();
+}
+
+void mdss_dsi_op_mode_config(int mode,
+			     struct mdss_panel_data *pdata)
+{
+
+	u32 dsi_ctrl, intr_ctrl;
+
+	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+	dsi_ctrl &= ~0x07;
+	if (mode == DSI_VIDEO_MODE) {
+		dsi_ctrl |= 0x03;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
+	} else {		/* command mode */
+		dsi_ctrl |= 0x05;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+				DSI_INTR_CMD_MDP_DONE_MASK;
+	}
+
+	pr_debug("%s: dsi_ctrl=%x intr=%x\n", __func__, dsi_ctrl, intr_ctrl);
+
+	MIPI_OUTP((pdata->dsi_base) + 0x0110, intr_ctrl); /* DSI_INTL_CTRL */
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+	wmb();
+}
+
+void mdss_dsi_cmd_mdp_start(void)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	mdss_dsi_enable_irq();
+	dsi_mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+}
+
+
+void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata)
+{
+	u32 status;
+	int timeout_us = 10000;
+
+	MIPI_OUTP((pdata->dsi_base) + 0x098, 0x01);	/* trigger */
+	wmb();
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout(((pdata->dsi_base) + 0x0008),
+				status, ((status & 0x0010) == 0),
+				0, timeout_us))
+		pr_info("%s: DSI status=%x failed\n", __func__, status);
+
+	mdss_dsi_ack_err_status((pdata->dsi_base));
+
+	pr_debug("%s: BTA done, status = %d\n", __func__, status);
+}
+
+int mdss_dsi_cmd_reg_tx(u32 data,
+			struct mdss_panel_data *pdata)
+{
+	int i;
+	char *bp;
+
+	bp = (char *)&data;
+	pr_debug("%s: ", __func__);
+	for (i = 0; i < 4; i++)
+		pr_debug("%x ", *bp++);
+
+	pr_debug("\n");
+
+	MIPI_OUTP((pdata->dsi_base) + 0x0084, 0x04);/* sw trigger */
+	MIPI_OUTP((pdata->dsi_base) + 0x0004, 0x135);
+
+	wmb();
+
+	MIPI_OUTP((pdata->dsi_base) + 0x03c, data);
+	wmb();
+	MIPI_OUTP((pdata->dsi_base) + 0x090, 0x01);	/* trigger */
+	wmb();
+
+	udelay(300);
+
+	return 4;
+}
+
+/*
+ * mdss_dsi_cmds_tx:
+ * ov_mutex need to be acquired before call this function.
+ */
+int mdss_dsi_cmds_tx(struct mdss_panel_data *pdata,
+		struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+{
+	struct dsi_cmd_desc *cm;
+	u32 dsi_ctrl, ctrl;
+	int i, video_mode;
+	unsigned long flag;
+
+	/* turn on cmd mode
+	* for video mode, do not send cmds more than
+	* one pixel line, since it only transmit it
+	* during BLLP.
+	*/
+	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
+	if (video_mode) {
+		ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
+		MIPI_OUTP((pdata->dsi_base) + 0x0004, ctrl);
+	}
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	mdss_dsi_enable_irq();
+	dsi_mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	cm = cmds;
+	mdss_dsi_buf_init(tp);
+	for (i = 0; i < cnt; i++) {
+		mdss_dsi_buf_init(tp);
+		mdss_dsi_cmd_dma_add(tp, cm);
+		mdss_dsi_cmd_dma_tx(tp, pdata);
+		if (cm->wait)
+			msleep(cm->wait);
+		cm++;
+	}
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	dsi_mdp_busy = false;
+	mdss_dsi_disable_irq();
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	if (video_mode)
+		MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl); /* restore */
+
+	return cnt;
+}
+
+/* MDSS_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd[] = {
+	{DTYPE_MAX_PKTSIZE, 1, 0, 0, 0,
+		sizeof(max_pktsize), max_pktsize}
+};
+
+/*
+ * DSI panel reply with  MAX_RETURN_PACKET_SIZE bytes of data
+ * plus DCS header, ECC and CRC for DCS long read response
+ * mdss_dsi_controller only have 4x32 bits register ( 16 bytes) to
+ * hold data per transaction.
+ * MDSS_DSI_LEN equal to 8
+ * len should be either 4 or 8
+ * any return data more than MDSS_DSI_LEN need to be break down
+ * to multiple transactions.
+ *
+ * ov_mutex need to be acquired before call this function.
+ */
+int mdss_dsi_cmds_rx(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int rlen)
+{
+	int cnt, len, diff, pkt_size;
+	unsigned long flag;
+	char cmd;
+
+	if (pdata->panel_info.mipi.no_max_pkt_size)
+		rlen = ALIGN(rlen, 4); /* Only support rlen = 4*n */
+
+	len = rlen;
+	diff = 0;
+
+	if (len <= 2)
+		cnt = 4;	/* short read */
+	else {
+		if (len > MDSS_DSI_LEN)
+			len = MDSS_DSI_LEN;	/* 8 bytes at most */
+
+		len = ALIGN(len, 4); /* len 4 bytes align */
+		diff = len - rlen;
+		/*
+		 * add extra 2 bytes to len to have overall
+		 * packet size is multipe by 4. This also make
+		 * sure 4 bytes dcs headerlocates within a
+		 * 32 bits register after shift in.
+		 * after all, len should be either 6 or 10.
+		 */
+		len += 2;
+		cnt = len + 6; /* 4 bytes header + 2 bytes crc */
+	}
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	mdss_dsi_enable_irq();
+	dsi_mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	if (!pdata->panel_info.mipi.no_max_pkt_size) {
+		/* packet size need to be set at every read */
+		pkt_size = len;
+		max_pktsize[0] = pkt_size;
+		mdss_dsi_buf_init(tp);
+		mdss_dsi_cmd_dma_add(tp, pkt_size_cmd);
+		mdss_dsi_cmd_dma_tx(tp, pdata);
+	}
+
+	mdss_dsi_buf_init(tp);
+	mdss_dsi_cmd_dma_add(tp, cmds);
+
+	/* transmit read comamnd to client */
+	mdss_dsi_cmd_dma_tx(tp, pdata);
+	/*
+	 * once cmd_dma_done interrupt received,
+	 * return data from client is ready and stored
+	 * at RDBK_DATA register already
+	 */
+	mdss_dsi_buf_init(rp);
+	if (pdata->panel_info.mipi.no_max_pkt_size) {
+		/*
+		 * expect rlen = n * 4
+		 * short alignement for start addr
+		 */
+		rp->data += 2;
+	}
+
+	mdss_dsi_cmd_dma_rx(rp, cnt, pdata);
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	dsi_mdp_busy = false;
+	mdss_dsi_disable_irq();
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	if (pdata->panel_info.mipi.no_max_pkt_size) {
+		/*
+		 * remove extra 2 bytes from previous
+		 * rx transaction at shift register
+		 * which was inserted during copy
+		 * shift registers to rx buffer
+		 * rx payload start from long alignment addr
+		 */
+		rp->data += 2;
+	}
+
+	cmd = rp->data[0];
+	switch (cmd) {
+	case DTYPE_ACK_ERR_RESP:
+		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+		break;
+	case DTYPE_GEN_READ1_RESP:
+	case DTYPE_DCS_READ1_RESP:
+		mdss_dsi_short_read1_resp(rp);
+		break;
+	case DTYPE_GEN_READ2_RESP:
+	case DTYPE_DCS_READ2_RESP:
+		mdss_dsi_short_read2_resp(rp);
+		break;
+	case DTYPE_GEN_LREAD_RESP:
+	case DTYPE_DCS_LREAD_RESP:
+		mdss_dsi_long_read_resp(rp);
+		rp->len -= 2; /* extra 2 bytes added */
+		rp->len -= diff; /* align bytes */
+		break;
+	default:
+		break;
+	}
+
+	return rp->len;
+}
+
+int mdss_dsi_cmd_dma_tx(struct dsi_buf *tp,
+			struct mdss_panel_data *pdata)
+{
+	int len;
+	int i;
+	char *bp;
+
+	bp = tp->data;
+
+	pr_debug("%s: ", __func__);
+	for (i = 0; i < tp->len; i++)
+		pr_debug("%x ", *bp++);
+
+	pr_debug("\n");
+
+	len = tp->len;
+	len += 3;
+	len &= ~0x03;	/* multipled by 4 */
+
+	tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&dsi_dev, tp->dmap))
+		pr_err("%s: dmap mapp failed\n", __func__);
+
+	INIT_COMPLETION(dsi_dma_comp);
+
+	MIPI_OUTP((pdata->dsi_base) + 0x048, tp->dmap);
+	MIPI_OUTP((pdata->dsi_base) + 0x04c, len);
+	wmb();
+
+	MIPI_OUTP((pdata->dsi_base) + 0x090, 0x01);	/* trigger */
+	wmb();
+
+	wait_for_completion(&dsi_dma_comp);
+
+	dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE);
+	tp->dmap = 0;
+	return tp->len;
+}
+
+int mdss_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen,
+			struct mdss_panel_data *pdata)
+{
+	u32 *lp, data;
+	int i, off, cnt;
+
+	lp = (u32 *)rp->data;
+	cnt = rlen;
+	cnt += 3;
+	cnt >>= 2;
+
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	off = 0x06c;	/* DSI_RDBK_DATA0 */
+	off += ((cnt - 1) * 4);
+
+
+	for (i = 0; i < cnt; i++) {
+		data = (u32)MIPI_INP((pdata->dsi_base) + off);
+		*lp++ = ntohl(data);	/* to network byte order */
+		off -= 4;
+		rp->len += sizeof(*lp);
+	}
+
+	return rlen;
+}
+
+void mdss_dsi_ack_err_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x0068);/* DSI_ACK_ERR_STATUS */
+
+	if (status) {
+		MIPI_OUTP(dsi_base + 0x0068, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_timeout_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x00c0);/* DSI_TIMEOUT_STATUS */
+	if (status & 0x0111) {
+		MIPI_OUTP(dsi_base + 0x00c0, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_dln0_phy_err(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x00b4);/* DSI_DLN0_PHY_ERR */
+
+	if (status & 0x011111) {
+		MIPI_OUTP(dsi_base + 0x00b4, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_fifo_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x000c);/* DSI_FIFO_STATUS */
+
+	if (status & 0x44444489) {
+		MIPI_OUTP(dsi_base + 0x000c, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_status(unsigned char *dsi_base)
+{
+	u32 status;
+
+	status = MIPI_INP(dsi_base + 0x0008);/* DSI_STATUS */
+
+	if (status & 0x80000000) {
+		MIPI_OUTP(dsi_base + 0x0008, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void mdss_dsi_error(unsigned char *dsi_base)
+{
+	/* DSI_ERR_INT_MASK0 */
+	mdss_dsi_ack_err_status(dsi_base);	/* mask0, 0x01f */
+	mdss_dsi_timeout_status(dsi_base);	/* mask0, 0x0e0 */
+	mdss_dsi_fifo_status(dsi_base);		/* mask0, 0x133d00 */
+	mdss_dsi_status(dsi_base);		/* mask0, 0xc0100 */
+	mdss_dsi_dln0_phy_err(dsi_base);	/* mask0, 0x3e00000 */
+}
+
+
+irqreturn_t mdss_dsi_isr(int irq, void *ptr)
+{
+	u32 isr;
+	unsigned char *dsi_base;
+
+	dsi_base = mdss_dsi_get_base_adr();
+	if (!dsi_base)
+		pr_err("%s:%d DSI base adr no Initialized",
+				       __func__, __LINE__);
+
+	isr = MIPI_INP(dsi_base + 0x0110);/* DSI_INTR_CTRL */
+	MIPI_OUTP(dsi_base + 0x0110, isr);
+
+	if (isr & DSI_INTR_ERROR)
+		mdss_dsi_error(dsi_base);
+
+	if (isr & DSI_INTR_VIDEO_DONE) {
+		/*
+		* do something  here
+		*/
+	}
+
+	if (isr & DSI_INTR_CMD_DMA_DONE)
+		complete(&dsi_dma_comp);
+
+	if (isr & DSI_INTR_CMD_MDP_DONE) {
+		spin_lock(&dsi_mdp_lock);
+		dsi_mdp_busy = false;
+		mdss_dsi_disable_irq_nosync();
+		spin_unlock(&dsi_mdp_lock);
+	}
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
new file mode 100644
index 0000000..bfb7fae
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -0,0 +1,358 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "mdss_dsi.h"
+
+#define DT_CMD_HDR 6
+
+static struct dsi_buf dsi_panel_tx_buf;
+static struct dsi_buf dsi_panel_rx_buf;
+
+static struct dsi_cmd_desc *dsi_panel_on_cmds;
+static struct dsi_cmd_desc *dsi_panel_off_cmds;
+static int num_of_on_cmds;
+static int num_of_off_cmds;
+static char *on_cmds, *off_cmds;
+
+static int mdss_dsi_panel_on(struct mdss_panel_data *pdata)
+{
+	struct mipi_panel_info *mipi;
+
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s:%d, debug info (mode) : %d\n", __func__, __LINE__,
+		 mipi->mode);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_on_cmds,
+			num_of_on_cmds);
+	} else {
+		pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
+{
+	struct mipi_panel_info *mipi;
+
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s:%d, debug info\n", __func__, __LINE__);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_off_cmds,
+			num_of_off_cmds);
+	} else {
+		pr_debug("%s:%d, CMD mode not supported", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_panel_parse_dt(struct platform_device *pdev,
+			    struct mdss_panel_common_pdata *panel_data)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 res[6], tmp;
+	int rc, i, len;
+	int cmd_plen, data_offset;
+	const char *data;
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+	if (rc) {
+		pr_err("%s:%d, panel resolution not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_data->panel_info.xres = (!rc ? res[0] : 640);
+	panel_data->panel_info.yres = (!rc ? res[1] : 480);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+	if (rc) {
+		pr_err("%s:%d, panel bpp not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_data->panel_info.bpp = (!rc ? tmp : 24);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-porch-values", res, 6);
+	panel_data->panel_info.lcdc.h_back_porch = (!rc ? res[0] : 6);
+	panel_data->panel_info.lcdc.h_pulse_width = (!rc ? res[1] : 2);
+	panel_data->panel_info.lcdc.h_front_porch = (!rc ? res[2] : 6);
+	panel_data->panel_info.lcdc.v_back_porch = (!rc ? res[3] : 6);
+	panel_data->panel_info.lcdc.v_pulse_width = (!rc ? res[4] : 2);
+	panel_data->panel_info.lcdc.v_front_porch = (!rc ? res[5] : 6);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-underflow-clr", &tmp);
+	panel_data->panel_info.lcdc.underflow_clr = (!rc ? tmp : 0xff);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-bl-levels", res, 2);
+	panel_data->panel_info.bl_min = (!rc ? res[0] : 0);
+	panel_data->panel_info.bl_max = (!rc ? res[1] : 255);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mode", &tmp);
+	panel_data->panel_info.mipi.mode = (!rc ? tmp : DSI_VIDEO_MODE);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-h-pulse-mode", &tmp);
+	panel_data->panel_info.mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-h-power-stop", res, 3);
+	panel_data->panel_info.mipi.hbp_power_stop = (!rc ? res[0] : false);
+	panel_data->panel_info.mipi.hsa_power_stop = (!rc ? res[1] : false);
+	panel_data->panel_info.mipi.hfp_power_stop = (!rc ? res[2] : false);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-bllp-power-stop", res, 2);
+	panel_data->panel_info.mipi.bllp_power_stop =
+					(!rc ? res[0] : false);
+	panel_data->panel_info.mipi.eof_bllp_power_stop =
+					(!rc ? res[1] : false);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-traffic-mode", &tmp);
+	panel_data->panel_info.mipi.traffic_mode =
+			(!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-dst-format", &tmp);
+	panel_data->panel_info.mipi.dst_format =
+			(!rc ? tmp : DSI_VIDEO_DST_FORMAT_RGB888);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-vc", &tmp);
+	panel_data->panel_info.mipi.vc = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-rgb-swap", &tmp);
+	panel_data->panel_info.mipi.rgb_swap = (!rc ? tmp : DSI_RGB_SWAP_RGB);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-data-lanes", res, 4);
+	panel_data->panel_info.mipi.data_lane0 = (!rc ? res[0] : true);
+	panel_data->panel_info.mipi.data_lane1 = (!rc ? res[1] : false);
+	panel_data->panel_info.mipi.data_lane2 = (!rc ? res[2] : false);
+	panel_data->panel_info.mipi.data_lane3 = (!rc ? res[3] : false);
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-t-clk", res, 2);
+	panel_data->panel_info.mipi.t_clk_pre = (!rc ? res[0] : 0x24);
+	panel_data->panel_info.mipi.t_clk_post = (!rc ? res[1] : 0x03);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-stream", &tmp);
+	panel_data->panel_info.mipi.stream = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mdp-tr", &tmp);
+	panel_data->panel_info.mipi.mdp_trigger =
+			(!rc ? tmp : DSI_CMD_TRIGGER_SW);
+	if (panel_data->panel_info.mipi.mdp_trigger > 6) {
+		pr_err("%s:%d, Invalid mdp trigger. Forcing to sw trigger",
+						 __func__, __LINE__);
+		panel_data->panel_info.mipi.mdp_trigger =
+					DSI_CMD_TRIGGER_SW;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dma-tr", &tmp);
+	panel_data->panel_info.mipi.dma_trigger =
+			(!rc ? tmp : DSI_CMD_TRIGGER_SW);
+	if (panel_data->panel_info.mipi.dma_trigger > 6) {
+		pr_err("%s:%d, Invalid dma trigger. Forcing to sw trigger",
+						 __func__, __LINE__);
+		panel_data->panel_info.mipi.dma_trigger =
+					DSI_CMD_TRIGGER_SW;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-frame-rate", &tmp);
+	panel_data->panel_info.mipi.frame_rate = (!rc ? tmp : 60);
+
+	data = of_get_property(np, "qcom,panel-on-cmds", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read ON cmds", __func__, __LINE__);
+		goto error;
+	}
+
+	on_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+	if (!on_cmds)
+		return -ENOMEM;
+
+	memcpy(on_cmds, data, len);
+
+	data_offset = 0;
+	cmd_plen = 0;
+	while ((len - data_offset) >= DT_CMD_HDR) {
+		data_offset += (DT_CMD_HDR - 1);
+		cmd_plen = on_cmds[data_offset++];
+		data_offset += cmd_plen;
+		num_of_on_cmds++;
+	}
+	if (!num_of_on_cmds) {
+		pr_err("%s:%d, No ON cmds specified", __func__, __LINE__);
+		goto error;
+	}
+
+	dsi_panel_on_cmds =
+		kzalloc((num_of_on_cmds * sizeof(struct dsi_cmd_desc)),
+						GFP_KERNEL);
+	if (!dsi_panel_on_cmds)
+		return -ENOMEM;
+
+	data_offset = 0;
+	for (i = 0; i < num_of_on_cmds; i++) {
+		dsi_panel_on_cmds[i].dtype = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].last = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].vc = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].ack = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].wait = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].dlen = on_cmds[data_offset++];
+		dsi_panel_on_cmds[i].payload = &on_cmds[data_offset];
+		data_offset += (dsi_panel_on_cmds[i].dlen);
+	}
+
+	if (data_offset != len) {
+		pr_err("%s:%d, Incorrect ON command entries",
+						__func__, __LINE__);
+		goto error;
+	}
+
+	data = of_get_property(np, "qcom,panel-off-cmds", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read OFF cmds", __func__, __LINE__);
+		goto error;
+	}
+
+	off_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+	if (!off_cmds)
+		return -ENOMEM;
+
+	memcpy(off_cmds, data, len);
+
+	data_offset = 0;
+	cmd_plen = 0;
+	while ((len - data_offset) >= DT_CMD_HDR) {
+		data_offset += (DT_CMD_HDR - 1);
+		cmd_plen = off_cmds[data_offset++];
+		data_offset += cmd_plen;
+		num_of_off_cmds++;
+	}
+	if (!num_of_off_cmds) {
+		pr_err("%s:%d, No OFF cmds specified", __func__, __LINE__);
+		goto error;
+	}
+
+	dsi_panel_off_cmds = kzalloc(num_of_off_cmds
+				* sizeof(struct dsi_cmd_desc),
+					GFP_KERNEL);
+	if (!dsi_panel_off_cmds)
+		return -ENOMEM;
+
+	data_offset = 0;
+	for (i = 0; i < num_of_off_cmds; i++) {
+		dsi_panel_off_cmds[i].dtype = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].last = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].vc = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].ack = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].wait = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].dlen = off_cmds[data_offset++];
+		dsi_panel_off_cmds[i].payload = &off_cmds[data_offset];
+		data_offset += (dsi_panel_off_cmds[i].dlen);
+	}
+
+	if (data_offset != len) {
+		pr_err("%s:%d, Incorrect OFF command entries",
+						__func__, __LINE__);
+		goto error;
+	}
+
+	return 0;
+error:
+	kfree(dsi_panel_on_cmds);
+	kfree(dsi_panel_off_cmds);
+	kfree(on_cmds);
+	kfree(off_cmds);
+
+	return -EINVAL;
+}
+
+static int __devinit mdss_dsi_panel_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct mdss_panel_common_pdata *vendor_pdata = NULL;
+	static const char *panel_name;
+
+	if (pdev->dev.parent == NULL) {
+		pr_err("%s: parent device missing\n", __func__);
+		return -ENODEV;
+	}
+
+	pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!panel_name)
+		pr_info("%s:%d, panel name not specified\n",
+						__func__, __LINE__);
+	else
+		pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+
+	vendor_pdata = devm_kzalloc(&pdev->dev,
+			sizeof(*vendor_pdata), GFP_KERNEL);
+	if (!vendor_pdata)
+		return -ENOMEM;
+
+	rc = mdss_panel_parse_dt(pdev, vendor_pdata);
+	if (rc) {
+		devm_kfree(&pdev->dev, vendor_pdata);
+		vendor_pdata = NULL;
+		return rc;
+	}
+	vendor_pdata->on = mdss_dsi_panel_on;
+	vendor_pdata->off = mdss_dsi_panel_off;
+
+	rc = dsi_panel_device_register(pdev, vendor_pdata);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static const struct of_device_id mdss_dsi_panel_match[] = {
+	{.compatible = "qcom,mdss-dsi-panel"},
+	{}
+};
+
+static struct platform_driver this_driver = {
+	.probe  = mdss_dsi_panel_probe,
+	.driver = {
+		.name   = "dsi_panel",
+		.of_match_table = mdss_dsi_panel_match,
+	},
+};
+
+static int __init mdss_dsi_panel_init(void)
+{
+	mdss_dsi_buf_alloc(&dsi_panel_tx_buf, DSI_BUF_SIZE);
+	mdss_dsi_buf_alloc(&dsi_panel_rx_buf, DSI_BUF_SIZE);
+
+	return platform_driver_register(&this_driver);
+}
+module_init(mdss_dsi_panel_init);
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index 4ca1dce..8825cc6 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -375,6 +375,7 @@
 #define MDSS_MDP_REG_INTF_FRAME_LINE_COUNT_EN		0x0A8
 #define MDSS_MDP_REG_INTF_FRAME_COUNT			0x0AC
 #define MDSS_MDP_REG_INTF_LINE_COUNT			0x0B0
+#define MDSS_MDP_PANEL_FORMAT_RGB888			0x213F
 
 enum mdss_mdp_pingpong_index {
 	MDSS_MDP_PINGPONG0,
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 21ef290..2f0a1f5 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -135,6 +135,8 @@
 			   p->hsync_skew);
 	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_POLARITY_CTL,
 			   polarity_ctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_PANEL_FORMAT,
+			   MDSS_MDP_PANEL_FORMAT_RGB888);
 
 	return 0;
 }
@@ -297,14 +299,14 @@
 	itp.underflow_clr = pinfo->lcdc.underflow_clr;
 	itp.hsync_skew = pinfo->lcdc.hsync_skew;
 
-	itp.xres = fbi->var.xres;
-	itp.yres = fbi->var.yres;
-	itp.h_back_porch = fbi->var.left_margin;
-	itp.h_front_porch = fbi->var.right_margin;
-	itp.v_back_porch = fbi->var.upper_margin;
-	itp.v_front_porch = fbi->var.lower_margin;
-	itp.hsync_pulse_width = fbi->var.hsync_len;
-	itp.vsync_pulse_width = fbi->var.vsync_len;
+	itp.xres =  pinfo->xres;
+	itp.yres = pinfo->yres;
+	itp.h_back_porch =  pinfo->lcdc.h_back_porch;
+	itp.h_front_porch =  pinfo->lcdc.h_front_porch;
+	itp.v_back_porch =  pinfo->lcdc.v_back_porch;
+	itp.v_front_porch = pinfo->lcdc.h_front_porch;
+	itp.hsync_pulse_width = pinfo->lcdc.h_pulse_width;
+	itp.vsync_pulse_width = pinfo->lcdc.v_pulse_width;
 
 	if (mdss_mdp_video_timegen_setup(ctl, &itp)) {
 		pr_err("unable to get timing parameters\n");
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 0411d8e..3ec3a5d 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -167,6 +167,7 @@
 struct mdss_panel_data {
 	struct mdss_panel_info panel_info;
 	void (*set_backlight) (u32 bl_level);
+	unsigned char *dsi_base;
 
 	/* function entry chain */
 	int (*on) (struct mdss_panel_data *pdata);
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
new file mode 100644
index 0000000..c766ec7
--- /dev/null
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -0,0 +1,199 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <mach/clk.h>
+#include <mach/msm_iomap.h>
+
+#include "mdss_dsi.h"
+
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+#define PWRDN_B BIT(7)
+
+static struct dsi_clk_desc dsi_pclk;
+
+static struct clk *dsi_byte_div_clk;
+static struct clk *dsi_esc_clk;
+
+int mdss_dsi_clk_on;
+
+int mdss_dsi_clk_init(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	dsi_byte_div_clk = clk_get(dev, "byte_clk");
+	if (IS_ERR(dsi_byte_div_clk)) {
+		pr_err("can't find dsi_byte_div_clk\n");
+		dsi_byte_div_clk = NULL;
+		goto mdss_dsi_clk_err;
+	}
+
+	dsi_esc_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(dsi_esc_clk)) {
+		printk(KERN_ERR "can't find dsi_esc_clk\n");
+		dsi_esc_clk = NULL;
+		goto mdss_dsi_clk_err;
+	}
+
+	return 0;
+
+mdss_dsi_clk_err:
+	mdss_dsi_clk_deinit(dev);
+	return -EPERM;
+}
+
+void mdss_dsi_clk_deinit(struct device *dev)
+{
+	if (dsi_byte_div_clk)
+		clk_put(dsi_byte_div_clk);
+	if (dsi_esc_clk)
+		clk_put(dsi_esc_clk);
+}
+
+#define PREF_DIV_RATIO 27
+struct dsiphy_pll_divider_config pll_divider_config;
+
+int mdss_dsi_clk_div_config(u8 bpp, u8 lanes,
+			    u32 *expected_dsi_pclk)
+{
+	u32 fb_divider, rate, vco;
+	u32 div_ratio = 0;
+	u32 pll_analog_posDiv = 1;
+	struct dsi_clk_mnd_table const *mnd_entry = mnd_table;
+	if (pll_divider_config.clk_rate == 0)
+		pll_divider_config.clk_rate = 454000000;
+
+	rate = (pll_divider_config.clk_rate / 2)
+			 / 1000000; /* Half Bit Clock In Mhz */
+
+	if (rate < 43) {
+		vco = rate * 16;
+		div_ratio = 16;
+		pll_analog_posDiv = 8;
+	} else if (rate < 85) {
+		vco = rate * 8;
+		div_ratio = 8;
+		pll_analog_posDiv = 4;
+	} else if (rate < 170) {
+		vco = rate * 4;
+		div_ratio = 4;
+		pll_analog_posDiv = 2;
+	} else if (rate < 340) {
+		vco = rate * 2;
+		div_ratio = 2;
+		pll_analog_posDiv = 1;
+	} else {
+		/* DSI PLL Direct path configuration */
+		vco = rate * 1;
+		div_ratio = 1;
+		pll_analog_posDiv = 1;
+	}
+
+	/* find the mnd settings from mnd_table entry */
+	for (; mnd_entry != mnd_table + ARRAY_SIZE(mnd_table); ++mnd_entry) {
+		if (((mnd_entry->lanes) == lanes) &&
+			((mnd_entry->bpp) == bpp))
+			break;
+	}
+
+	if (mnd_entry == mnd_table + ARRAY_SIZE(mnd_table)) {
+		pr_err("%s: requested Lanes, %u & BPP, %u, not supported\n",
+			__func__, lanes, bpp);
+		return -EINVAL;
+	}
+	fb_divider = ((vco * PREF_DIV_RATIO) / 27);
+	pll_divider_config.fb_divider = fb_divider;
+	pll_divider_config.ref_divider_ratio = PREF_DIV_RATIO;
+	pll_divider_config.bit_clk_divider = div_ratio;
+	pll_divider_config.byte_clk_divider =
+			pll_divider_config.bit_clk_divider * 8;
+	pll_divider_config.analog_posDiv = pll_analog_posDiv;
+	pll_divider_config.digital_posDiv =
+			(mnd_entry->pll_digital_posDiv) * div_ratio;
+
+	if ((mnd_entry->pclk_d == 0)
+		|| (mnd_entry->pclk_m == 1)) {
+		dsi_pclk.mnd_mode = 0;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.pre_div_func = (mnd_entry->pclk_n - 1);
+	} else {
+		dsi_pclk.mnd_mode = 2;
+		dsi_pclk.src = 0x3;
+		dsi_pclk.m = mnd_entry->pclk_m;
+		dsi_pclk.n = mnd_entry->pclk_n;
+		dsi_pclk.d = mnd_entry->pclk_d;
+	}
+	*expected_dsi_pclk = (((pll_divider_config.clk_rate) * lanes)
+				      / (8 * bpp));
+
+	return 0;
+}
+
+void cont_splash_clk_ctrl(int enable)
+{
+	static int cont_splash_clks_enabled;
+	if (enable && !cont_splash_clks_enabled) {
+			clk_prepare_enable(dsi_byte_div_clk);
+			clk_prepare_enable(dsi_esc_clk);
+			cont_splash_clks_enabled = 1;
+	} else if (!enable && cont_splash_clks_enabled) {
+			clk_disable_unprepare(dsi_byte_div_clk);
+			clk_disable_unprepare(dsi_esc_clk);
+			cont_splash_clks_enabled = 0;
+	}
+}
+
+void mdss_dsi_prepare_clocks(void)
+{
+	clk_prepare(dsi_byte_div_clk);
+	clk_prepare(dsi_esc_clk);
+}
+
+void mdss_dsi_unprepare_clocks(void)
+{
+	clk_unprepare(dsi_esc_clk);
+	clk_unprepare(dsi_byte_div_clk);
+}
+
+void mdss_dsi_clk_enable(void)
+{
+	if (mdss_dsi_clk_on) {
+		pr_info("%s: mdss_dsi_clks already ON\n", __func__);
+		return;
+	}
+
+	if (clk_set_rate(dsi_byte_div_clk, 1) < 0)	/* divided by 1 */
+		pr_err("%s: dsi_byte_div_clk - clk_set_rate failed\n",
+					__func__);
+	if (clk_set_rate(dsi_esc_clk, 2) < 0) /* divided by 2 */
+		pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
+					__func__);
+	clk_enable(dsi_byte_div_clk);
+	clk_enable(dsi_esc_clk);
+	mdss_dsi_clk_on = 1;
+}
+
+void mdss_dsi_clk_disable(void)
+{
+	if (mdss_dsi_clk_on == 0) {
+		pr_info("%s: mdss_dsi_clks already OFF\n", __func__);
+		return;
+	}
+	clk_disable(dsi_esc_clk);
+	clk_disable(dsi_byte_div_clk);
+	mdss_dsi_clk_on = 0;
+}
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 72e3600..a984637 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1438,10 +1438,22 @@
 
 	/* cursor memory allocation */
 	if (mfd->cursor_update) {
+		unsigned long cursor_buf_iommu = 0;
 		mfd->cursor_buf = dma_alloc_coherent(NULL,
 					MDP_CURSOR_SIZE,
 					(dma_addr_t *) &mfd->cursor_buf_phys,
 					GFP_KERNEL);
+
+		msm_iommu_map_contig_buffer((unsigned long)mfd->cursor_buf_phys,
+					    DISPLAY_READ_DOMAIN,
+					    GEN_POOL,
+					    MDP_CURSOR_SIZE,
+					    SZ_4K,
+					    0,
+					    &cursor_buf_iommu);
+		if (cursor_buf_iommu)
+			mfd->cursor_buf_phys = (void *)cursor_buf_iommu;
+
 		if (!mfd->cursor_buf)
 			mfd->cursor_update = 0;
 	}
@@ -1483,7 +1495,10 @@
 	ret = 0;
 
 #ifdef CONFIG_HAS_EARLYSUSPEND
-	if (hdmi_prim_display || mfd->panel_info.type != DTV_PANEL) {
+
+	if (hdmi_prim_display ||
+	    (mfd->panel_info.type != DTV_PANEL &&
+	     mfd->panel_info.type != WRITEBACK_PANEL)) {
 		mfd->early_suspend.suspend = msmfb_early_suspend;
 		mfd->early_suspend.resume = msmfb_early_resume;
 		mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 22eaf4f..72fe2e3 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -1302,6 +1302,7 @@
 		ddl_process_decoder_metadata(ddl);
 		vidc_sm_get_aspect_ratio_info(
 			&ddl->shared_mem[ddl->command_channel],
+			decoder->codec.codec,
 			&output_vcd_frm->aspect_ratio_info);
 		ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
 			vcd_status, output_frame,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
index 839a9c1..d45de2d 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
@@ -205,6 +205,10 @@
 #define VIDC_SM_ASPECT_RATIO_INFO_ADDR               0x00c8
 #define VIDC_SM_MPEG4_ASPECT_RATIO_INFO_BMSK         0xf
 #define VIDC_SM_MPEG4_ASPECT_RATIO_INFO_SHFT         0x0
+#define VIDC_SM_MPEG2_ASPECT_RATIO_INFO_BMSK         0x000f0000
+#define VIDC_SM_MPEG2_ASPECT_RATIO_INFO_SHFT         16
+#define VIDC_SM_H264_ASPECT_RATIO_INFO_BMSK          0x00000ff0
+#define VIDC_SM_H264_ASPECT_RATIO_INFO_SHFT          4
 #define VIDC_SM_EXTENDED_PAR_ADDR                    0x00cc
 #define VIDC_SM_EXTENDED_PAR_WIDTH_BMSK              0xffff0000
 #define VIDC_SM_EXTENDED_PAR_WIDTH_SHFT              16
@@ -802,23 +806,160 @@
 }
 
 void vidc_sm_get_aspect_ratio_info(struct ddl_buf_addr *shared_mem,
-	struct vcd_aspect_ratio *aspect_ratio_info)
+	enum vcd_codec codec, struct vcd_aspect_ratio *aspect_ratio_info)
 {
-	u32 extended_par_info = 0;
-	aspect_ratio_info->aspect_ratio = DDL_MEM_READ_32(shared_mem,
+	u32 extended_par_info = 0, aspect_ratio = 0;
+
+	aspect_ratio = DDL_MEM_READ_32(shared_mem,
 				VIDC_SM_ASPECT_RATIO_INFO_ADDR);
 
-	if (aspect_ratio_info->aspect_ratio == 0x0f) {
-		extended_par_info = DDL_MEM_READ_32(shared_mem,
-			VIDC_SM_EXTENDED_PAR_ADDR);
-		aspect_ratio_info->extended_par_width =
-			VIDC_GETFIELD(extended_par_info,
-			VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
-			VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
-		aspect_ratio_info->extended_par_height =
-			VIDC_GETFIELD(extended_par_info,
-			VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
-			VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+	if (codec == VCD_CODEC_H264) {
+		aspect_ratio_info->aspect_ratio =
+			VIDC_GETFIELD(aspect_ratio,
+			VIDC_SM_H264_ASPECT_RATIO_INFO_BMSK,
+			VIDC_SM_H264_ASPECT_RATIO_INFO_SHFT);
+
+		switch (aspect_ratio_info->aspect_ratio) {
+		case 1:
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		case 2:
+			aspect_ratio_info->par_width    = 12;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 3:
+			aspect_ratio_info->par_width    = 10;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 4:
+			aspect_ratio_info->par_width    = 16;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 5:
+			aspect_ratio_info->par_width    = 40;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 6:
+			aspect_ratio_info->par_width    = 24;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 7:
+			aspect_ratio_info->par_width    = 20;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 8:
+			aspect_ratio_info->par_width    = 32;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 9:
+			aspect_ratio_info->par_width    = 80;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 10:
+			aspect_ratio_info->par_width    = 18;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 11:
+			aspect_ratio_info->par_width    = 15;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 12:
+			aspect_ratio_info->par_width    = 64;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 13:
+			aspect_ratio_info->par_width    = 160;
+			aspect_ratio_info->par_height   = 99;
+			break;
+		case 14:
+			aspect_ratio_info->par_width    = 4;
+			aspect_ratio_info->par_height   = 3;
+			break;
+		case 15:
+			aspect_ratio_info->par_width    = 3;
+			aspect_ratio_info->par_height   = 2;
+			break;
+		case 16:
+			aspect_ratio_info->par_width    = 2;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		case 255:
+			extended_par_info = DDL_MEM_READ_32(shared_mem,
+				VIDC_SM_EXTENDED_PAR_ADDR);
+			aspect_ratio_info->par_width =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
+				VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
+			aspect_ratio_info->par_height =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+			break;
+		default:
+			DDL_MSG_ERROR("Incorrect Aspect Ratio.");
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		}
+	} else if ((codec == VCD_CODEC_MPEG4) ||
+		(codec == VCD_CODEC_DIVX_4) ||
+		(codec == VCD_CODEC_DIVX_5) ||
+		(codec == VCD_CODEC_DIVX_6) ||
+		(codec == VCD_CODEC_XVID) ||
+		(codec == VCD_CODEC_MPEG2)) {
+
+		if (codec == VCD_CODEC_MPEG2) {
+			aspect_ratio_info->aspect_ratio =
+				VIDC_GETFIELD(aspect_ratio,
+				VIDC_SM_MPEG2_ASPECT_RATIO_INFO_BMSK,
+				VIDC_SM_MPEG2_ASPECT_RATIO_INFO_SHFT);
+		} else {
+			aspect_ratio_info->aspect_ratio =
+				VIDC_GETFIELD(aspect_ratio,
+				VIDC_SM_MPEG4_ASPECT_RATIO_INFO_BMSK,
+				VIDC_SM_MPEG4_ASPECT_RATIO_INFO_SHFT);
+		}
+
+		switch (aspect_ratio_info->aspect_ratio) {
+		case 1:
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		case 2:
+			aspect_ratio_info->par_width    = 12;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 3:
+			aspect_ratio_info->par_width    = 10;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 4:
+			aspect_ratio_info->par_width    = 16;
+			aspect_ratio_info->par_height   = 11;
+			break;
+		case 5:
+			aspect_ratio_info->par_width    = 40;
+			aspect_ratio_info->par_height   = 33;
+			break;
+		case 15:
+			extended_par_info = DDL_MEM_READ_32(shared_mem,
+				VIDC_SM_EXTENDED_PAR_ADDR);
+			aspect_ratio_info->par_width =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
+				VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
+			aspect_ratio_info->par_height =
+				VIDC_GETFIELD(extended_par_info,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
+				VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+			break;
+		default:
+			DDL_MSG_ERROR("Incorrect Aspect Ratio.");
+			aspect_ratio_info->par_width    = 1;
+			aspect_ratio_info->par_height   = 1;
+			break;
+		}
 	}
 }
 
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
index 6cd75595..1a46c36 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
@@ -180,7 +180,7 @@
 	struct ddl_buf_addr *shared_mem,
 	enum vidc_sm_num_stuff_bytes_consume_info consume_info);
 void vidc_sm_get_aspect_ratio_info(struct ddl_buf_addr *shared_mem,
-	struct vcd_aspect_ratio *aspect_ratio_info);
+	enum vcd_codec codec, struct vcd_aspect_ratio *aspect_ratio_info);
 void vidc_sm_set_encoder_slice_batch_int_ctrl(struct ddl_buf_addr *shared_mem,
 	u32 slice_batch_int_enable);
 void vidc_sm_get_num_slices_comp(struct ddl_buf_addr *shared_mem,
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
index 02b2369..a144e06 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
@@ -42,8 +42,17 @@
 	}
 
 	DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context));
-
 	DDL_BUSY(ddl_context);
+
+	if (res_trk_get_enable_ion()) {
+		VIDC_LOGERR_STRING("ddl_dev_init: ION framework enabled");
+		ddl_context->video_ion_client  =
+			res_trk_get_ion_client();
+		if (!ddl_context->video_ion_client) {
+			VIDC_LOGERR_STRING("ION client create failed");
+			return VCD_ERR_ILLEGAL_OP;
+		}
+	}
 	ddl_context->memtype = res_trk_get_mem_type();
 	if (ddl_context->memtype == -1) {
 		VIDC_LOGERR_STRING("ddl_dev_init:Invalid Memtype");
@@ -161,7 +170,7 @@
 
 	VIDC_LOG_STRING("FW_ENDDONE");
 	ddl_release_context_buffers(ddl_context);
-
+	ddl_context->video_ion_client = NULL;
 	DDL_IDLE(ddl_context);
 
 	return VCD_S_SUCCESS;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
index e1407c8..e6d3527 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,7 @@
 	u32 *align_physical_addr;
 	u32 *align_virtual_addr;
 	struct msm_mapped_buffer *mapped_buffer;
+	struct ion_handle *alloc_handle;
 	u32 buffer_size;
 	enum ddl_mem_area mem_type;
 };
@@ -225,6 +226,7 @@
 	struct ddl_buf_addr dbg_core_dump;
 	u32 enable_dbg_core_dump;
 	struct ddl_client_context *ddl_clients[VCD_MAX_NO_CLIENT];
+	struct ion_client *video_ion_client;
 	u32 device_state;
 	u32 ddl_busy;
 	u32  intr_status;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
index aa0d4b8..21f01d1 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
@@ -13,6 +13,7 @@
 #include <linux/memory_alloc.h>
 #include <media/msm/vidc_type.h>
 #include "vcd_ddl_utils.h"
+#include "vcd_res_tracker_api.h"
 
 #if DEBUG
 #define DBG(x...) printk(KERN_DEBUG x)
@@ -91,103 +92,178 @@
 	u32 alloc_size, flags = 0;
 	struct ddl_context *ddl_context;
 	struct msm_mapped_buffer *mapped_buffer = NULL;
+	unsigned long *kernel_vaddr = NULL;
+	ion_phys_addr_t phyaddr = 0;
+	size_t len = 0;
+	int ret = -EINVAL;
 
 	if (!buff_addr) {
-		ERR("\n%s() Invalid Parameters", __func__);
+		ERR("\n%s() Invalid Parameters\n", __func__);
 		return;
 	}
-
-	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
-
 	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
-
 		guard_bytes = 31;
 		align_mask = 0xFFFFFFE0U;
-
 	} else {
-
 		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
 		align_mask = DDL_TILE_BUF_ALIGN_MASK;
 	}
 	ddl_context = ddl_get_context();
 	alloc_size = sz + guard_bytes;
+	if (res_trk_get_enable_ion()) {
+		if (!ddl_context->video_ion_client)
+			ddl_context->video_ion_client =
+				res_trk_get_ion_client();
+		if (!ddl_context->video_ion_client) {
+			ERR("\n%s(): DDL ION Client Invalid handle\n",
+				__func__);
+			goto bailout;
+		}
+		buff_addr->mem_type = res_trk_get_mem_type();
+		buff_addr->alloc_handle = ion_alloc(
+					ddl_context->video_ion_client,
+					alloc_size,
+					SZ_4K,
+					buff_addr->mem_type);
+		if (!buff_addr->alloc_handle) {
+			ERR("\n%s(): DDL ION alloc failed\n",
+					__func__);
+			goto bailout;
+		}
+		ret = ion_phys(ddl_context->video_ion_client,
+					buff_addr->alloc_handle,
+					&phyaddr,
+					&len);
+		if (ret || !phyaddr) {
+			ERR("\n%s(): DDL ION client physical failed\n",
+					__func__);
+			goto free_ion_buffer;
+		}
+		buff_addr->physical_base_addr = (u32 *)phyaddr;
+		kernel_vaddr = (unsigned long *) ion_map_kernel(
+					ddl_context->video_ion_client,
+					buff_addr->alloc_handle,
+					UNCACHED);
+		if (IS_ERR_OR_NULL(kernel_vaddr)) {
+			ERR("\n%s(): DDL ION map failed\n", __func__);
+			goto unmap_ion_buffer;
+		}
+		buff_addr->virtual_base_addr = (u32 *)kernel_vaddr;
+		DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\
+			"phys(0x%x), virt(0x%x), size(%u), align(%u), "\
+			"alloced_len(%u)", (u32)buff_addr->alloc_handle,
+			(u32)buff_addr->mem_type,
+			(u32)buff_addr->physical_base_addr,
+			(u32)buff_addr->virtual_base_addr,
+			alloc_size, align, len);
+	} else {
+		physical_addr = (u32)
+			allocate_contiguous_memory_nomap(alloc_size,
+						ddl_context->memtype, SZ_4K);
+		if (!physical_addr) {
+			ERR("\n%s(): DDL pmem allocate failed\n",
+			       __func__);
+			goto bailout;
+		}
+		buff_addr->physical_base_addr = (u32 *) physical_addr;
+		flags = MSM_SUBSYSTEM_MAP_KADDR;
+		buff_addr->mapped_buffer =
+		msm_subsystem_map_buffer((unsigned long)physical_addr,
+		alloc_size, flags, NULL, 0);
+		if (IS_ERR(buff_addr->mapped_buffer)) {
+			ERR("\n%s() buffer map failed\n", __func__);
+			goto free_pmem_buffer;
+		}
+		mapped_buffer = buff_addr->mapped_buffer;
+		if (!mapped_buffer->vaddr) {
+			ERR("\n%s() mapped virtual address is NULL\n",
+				__func__);
+			goto unmap_pmem_buffer;
+		}
+		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
+		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
+			" virt(0x%x), sz(%u), align(%u)",
+			(u32)buff_addr->mem_type,
+			(u32)buff_addr->physical_base_addr,
+			(u32)buff_addr->virtual_base_addr,
+			alloc_size, SZ_4K);
+	}
 
-	physical_addr = (u32)
-		allocate_contiguous_memory_nomap(alloc_size,
-					ddl_context->memtype, SZ_4K);
-
-	if (!physical_addr) {
-		pr_err("%s(): could not allocate kernel pmem buffers\n",
-		       __func__);
-		goto bailout;
-	}
-	buff_addr->physical_base_addr = (u32 *) physical_addr;
-	flags = MSM_SUBSYSTEM_MAP_KADDR;
-	buff_addr->mapped_buffer =
-	msm_subsystem_map_buffer((unsigned long)physical_addr,
-	alloc_size, flags, NULL, 0);
-	if (IS_ERR(buff_addr->mapped_buffer)) {
-		pr_err(" %s() buffer map failed", __func__);
-		goto free_acm_alloc;
-	}
-	mapped_buffer = buff_addr->mapped_buffer;
-	if (!mapped_buffer->vaddr) {
-		pr_err("%s() mapped virtual address is NULL", __func__);
-		goto free_map_buffers;
-	}
-	buff_addr->virtual_base_addr = mapped_buffer->vaddr;
 	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
 	buff_addr->buffer_size = sz;
-
-	buff_addr->align_physical_addr =
-	    (u32 *) ((physical_addr + guard_bytes) & align_mask);
-
-	align_offset =
-	    (u32) (buff_addr->align_physical_addr) - physical_addr;
-
+	buff_addr->align_physical_addr = (u32 *)
+		(((u32)buff_addr->physical_base_addr + guard_bytes) &
+		align_mask);
+	align_offset = (u32) (buff_addr->align_physical_addr) -
+		(u32)buff_addr->physical_base_addr;
 	buff_addr->align_virtual_addr =
 	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
 		     + align_offset);
-
-	DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
-		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
-		buff_addr->buffer_size);
-
+	DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\
+		" align_virt(0x%x)", __func__,
+		(u32)buff_addr->physical_base_addr,
+		(u32)buff_addr->align_physical_addr,
+		(u32)buff_addr->virtual_base_addr,
+		(u32)buff_addr->align_virtual_addr);
 	return;
-free_map_buffers:
-	msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
-free_acm_alloc:
-	free_contiguous_memory_by_paddr(
-		(unsigned long) physical_addr);
+
+unmap_pmem_buffer:
+	if (buff_addr->mapped_buffer)
+		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
+free_pmem_buffer:
+	if (buff_addr->physical_base_addr)
+		free_contiguous_memory_by_paddr((unsigned long)
+			buff_addr->physical_base_addr);
+	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
+	return;
+
+unmap_ion_buffer:
+	if (ddl_context->video_ion_client) {
+		if (buff_addr->alloc_handle)
+			ion_unmap_kernel(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+	}
+free_ion_buffer:
+	if (ddl_context->video_ion_client) {
+		if (buff_addr->alloc_handle)
+			ion_free(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+	}
 bailout:
-	buff_addr->physical_base_addr = NULL;
-	buff_addr->virtual_base_addr = NULL;
-	buff_addr->buffer_size = 0;
-	buff_addr->mapped_buffer = NULL;
+	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
 }
 
 void ddl_pmem_free(struct ddl_buf_addr *buff_addr)
 {
+	struct ddl_context *ddl_context;
+	ddl_context = ddl_get_context();
 	if (!buff_addr) {
 		ERR("\n %s() invalid arguments %p", __func__, buff_addr);
 		return;
 	}
-	DBG_PMEM("\n%s() IN: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
-		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
+	DBG("ddl_pmem_free: phys(0x%x) align_phys(0x%x), "\
+		"virt(0x%x), align_virt(0x%x), size(%u)",
+		(u32)buff_addr->physical_base_addr,
+		(u32)buff_addr->align_physical_addr,
+		(u32)buff_addr->virtual_base_addr,
+		(u32)buff_addr->align_virtual_addr,
 		buff_addr->buffer_size);
-
-	if (buff_addr->mapped_buffer)
-		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
-	if (buff_addr->physical_base_addr)
-		free_contiguous_memory_by_paddr(
-			(unsigned long) buff_addr->physical_base_addr);
-	DBG_PMEM("\n%s() OUT: phy_addr(%p) ker_addr(%p) size(%u)", __func__,
-		buff_addr->physical_base_addr, buff_addr->virtual_base_addr,
-		buff_addr->buffer_size);
-	buff_addr->buffer_size = 0;
-	buff_addr->physical_base_addr = NULL;
-	buff_addr->virtual_base_addr = NULL;
-	buff_addr->mapped_buffer = NULL;
+	if (ddl_context->video_ion_client) {
+		if (buff_addr->alloc_handle) {
+			ion_unmap_kernel(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+			ion_free(ddl_context->video_ion_client,
+				buff_addr->alloc_handle);
+		}
+	} else {
+		if (buff_addr->mapped_buffer)
+			msm_subsystem_unmap_buffer(
+				buff_addr->mapped_buffer);
+		if (buff_addr->physical_base_addr)
+			free_contiguous_memory_by_paddr((unsigned long)
+				buff_addr->physical_base_addr);
+	}
+	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
 }
 #endif
 
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
index e51bf45..aee9dfe 100644
--- a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
@@ -677,8 +677,16 @@
 	return false;
 }
 
+static struct ion_client *res_trk_create_ion_client(void){
+	struct ion_client *video_client;
+	VCDRES_MSG_LOW("%s", __func__);
+	video_client = msm_ion_client_create(-1, "video_client");
+	return video_client;
+}
+
 void res_trk_init(struct device *device, u32 irq)
 {
+	VCDRES_MSG_LOW("%s", __func__);
 	if (resource_context.device || resource_context.irq_num ||
 		!device) {
 		VCDRES_MSG_ERROR("%s() Resource Tracker Init error\n",
@@ -695,9 +703,27 @@
 		(struct msm_vidc_platform_data *) device->platform_data;
 	if (resource_context.vidc_platform_data) {
 		resource_context.memtype =
-		resource_context.vidc_platform_data->memtype;
+			resource_context.vidc_platform_data->memtype;
+		VCDRES_MSG_LOW("%s(): resource_context.memtype = 0x%x",
+			__func__, (u32)resource_context.memtype);
+		if (resource_context.vidc_platform_data->enable_ion) {
+			resource_context.res_ion_client =
+				res_trk_create_ion_client();
+			if (!(resource_context.res_ion_client)) {
+				VCDRES_MSG_ERROR("%s()ION createfail\n",
+						__func__);
+				return;
+			}
+			VCDRES_MSG_LOW("%s(): ion_client = 0x%x", __func__,
+				(u32)resource_context.res_ion_client);
+		} else {
+			VCDRES_MSG_ERROR("%s(): ION not disabled\n",
+					__func__);
+		}
 	} else {
 		resource_context.memtype = -1;
+		VCDRES_MSG_ERROR("%s(): vidc_platform_data is NULL",
+			__func__);
 	}
 }
 
@@ -705,18 +731,23 @@
 	return resource_context.core_type;
 }
 
-u32 res_trk_get_mem_type(void){
-	return resource_context.memtype;
-}
-
 u32 res_trk_get_enable_ion(void)
 {
-	return 0;
+	if (resource_context.vidc_platform_data->enable_ion)
+		return 1;
+	else
+		return 0;
 }
 
 struct ion_client *res_trk_get_ion_client(void)
 {
-	return NULL;
+	return resource_context.res_ion_client;
+}
+
+u32 res_trk_get_mem_type(void)
+{
+	u32 mem_type = ION_HEAP(resource_context.memtype);
+	return mem_type;
 }
 
 void res_trk_set_mem_type(enum ddl_mem_area mem_type)
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
index 2b92a42..f8d9053 100644
--- a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.h
@@ -13,6 +13,7 @@
 #ifndef _VIDEO_720P_RESOURCE_TRACKER_H_
 #define _VIDEO_720P_RESOURCE_TRACKER_H_
 #include <mach/board.h>
+#include <linux/ion.h>
 #include "vcd_res_tracker_api.h"
 
 #define VCD_RESTRK_MIN_PERF_LEVEL 37900
@@ -36,6 +37,8 @@
 	u32 core_type;
 	int memtype;
 	u32 secure_session;
+	struct ion_client *res_ion_client;
+	enum ddl_mem_area res_mem_type;
 };
 
 #if DEBUG
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 634011b..927f19b 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -334,9 +334,9 @@
 		output_frame->aspect_ratio_info.aspect_ratio =
 			vcd_frame_data->aspect_ratio_info.aspect_ratio;
 		output_frame->aspect_ratio_info.par_width =
-			vcd_frame_data->aspect_ratio_info.extended_par_width;
+			vcd_frame_data->aspect_ratio_info.par_width;
 		output_frame->aspect_ratio_info.par_height =
-			vcd_frame_data->aspect_ratio_info.extended_par_height;
+			vcd_frame_data->aspect_ratio_info.par_height;
 		vdec_msg->vdec_msg_info.msgdatasize =
 		    sizeof(struct vdec_output_frameinfo);
 	} else {
@@ -915,7 +915,8 @@
 				 __func__);
 			goto import_ion_error;
 		}
-		if (res_trk_check_for_sec_session()) {
+		if (res_trk_check_for_sec_session() ||
+		   (res_trk_get_core_type() == (u32)VCD_CORE_720P)) {
 			rc = ion_phys(client_ctx->user_ion_client,
 				client_ctx->h264_mv_ion_handle,
 				(unsigned long *) (&(vcd_h264_mv_buffer->
@@ -1038,7 +1039,8 @@
 	if (!IS_ERR_OR_NULL(client_ctx->h264_mv_ion_handle)) {
 		ion_unmap_kernel(client_ctx->user_ion_client,
 					client_ctx->h264_mv_ion_handle);
-		if (!res_trk_check_for_sec_session()) {
+		if (!res_trk_check_for_sec_session() &&
+		   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 			ion_unmap_iommu(client_ctx->user_ion_client,
 				client_ctx->h264_mv_ion_handle,
 				VIDEO_DOMAIN,
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index 9450ee7..50cccbb 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -1844,7 +1844,8 @@
 				 __func__);
 			goto import_ion_error;
 		}
-		if (res_trk_check_for_sec_session()) {
+		if (res_trk_check_for_sec_session() ||
+		   (res_trk_get_core_type() == (u32)VCD_CORE_720P)) {
 			rc = ion_phys(client_ctx->user_ion_client,
 				client_ctx->recon_buffer_ion_handle[i],
 				&phy_addr, &ion_len);
@@ -1945,7 +1946,8 @@
 		if (client_ctx->recon_buffer_ion_handle[i]) {
 			ion_unmap_kernel(client_ctx->user_ion_client,
 				client_ctx->recon_buffer_ion_handle[i]);
-			if (!res_trk_check_for_sec_session()) {
+			if (!res_trk_check_for_sec_session() &&
+			   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 				ion_unmap_iommu(client_ctx->user_ion_client,
 				client_ctx->recon_buffer_ion_handle[i],
 				VIDEO_DOMAIN,
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index dcacb3c..c884cf5 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -432,7 +432,9 @@
 				ion_unmap_kernel(client_ctx->user_ion_client,
 						buf_addr_table[i].
 						buff_ion_handle);
-				if (!res_trk_check_for_sec_session()) {
+				if (!res_trk_check_for_sec_session() &&
+				   (res_trk_get_core_type() !=
+				   (u32)VCD_CORE_720P)) {
 					ion_unmap_iommu(
 						client_ctx->user_ion_client,
 						buf_addr_table[i].
@@ -456,7 +458,8 @@
 		if (!IS_ERR_OR_NULL(client_ctx->user_ion_client)) {
 			ion_unmap_kernel(client_ctx->user_ion_client,
 					client_ctx->h264_mv_ion_handle);
-			if (!res_trk_check_for_sec_session()) {
+			if (!res_trk_check_for_sec_session() &&
+			    (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 				ion_unmap_iommu(client_ctx->user_ion_client,
 					client_ctx->h264_mv_ion_handle,
 					VIDEO_DOMAIN,
@@ -652,7 +655,8 @@
 				*kernel_vaddr = (unsigned long)NULL;
 				goto ion_free_error;
 			}
-			if (res_trk_check_for_sec_session()) {
+			if (res_trk_check_for_sec_session() ||
+			   (res_trk_get_core_type() == (u32)VCD_CORE_720P)) {
 				if (ion_phys(client_ctx->user_ion_client,
 					buff_ion_handle,
 					&phys_addr, &ion_len)) {
@@ -780,7 +784,7 @@
 		*num_of_buffers = *num_of_buffers + 1;
 		DBG("%s() : client_ctx = %p, user_virt_addr = 0x%08lx, "
 			"kernel_vaddr = 0x%08lx inserted!", __func__,
-			client_ctx, user_vaddr, *kernel_vaddr);
+			client_ctx, user_vaddr, kernel_vaddr);
 	}
 	mutex_unlock(&client_ctx->enrty_queue_lock);
 	return true;
@@ -833,7 +837,8 @@
 	if (buf_addr_table[i].buff_ion_handle) {
 		ion_unmap_kernel(client_ctx->user_ion_client,
 				buf_addr_table[i].buff_ion_handle);
-		if (!res_trk_check_for_sec_session()) {
+		if (!res_trk_check_for_sec_session() &&
+		   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
 			ion_unmap_iommu(client_ctx->user_ion_client,
 				buf_addr_table[i].buff_ion_handle,
 				VIDEO_DOMAIN,
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index c11ac30..28ea453 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -41,6 +41,8 @@
 	unsigned long buffer_size = 0;
 	int ret = 0;
 	unsigned long ionflag = 0;
+	ion_phys_addr_t phyaddr = 0;
+	size_t len = 0;
 
 	if (!kernel_vaddr || !phy_addr || !cctxt) {
 		pr_err("\n%s: Invalid parameters", __func__);
@@ -84,6 +86,9 @@
 		}
 		*phy_addr = (u8 *) mapped_buffer->iova[0];
 		*kernel_vaddr = (u8 *) mapped_buffer->vaddr;
+		VCD_MSG_LOW("vcd_pmem_alloc: phys(0x%x), virt(0x%x), "\
+			"sz(%u), flags(0x%x)", (u32)*phy_addr,
+			(u32)*kernel_vaddr, sz, (u32)flags);
 	} else {
 		map_buffer->alloc_handle = ion_alloc(
 			    cctxt->vcd_ion_client, sz, SZ_4K,
@@ -106,7 +111,8 @@
 			pr_err("%s() ION map failed", __func__);
 			goto ion_free_bailout;
 		}
-		ret = ion_map_iommu(cctxt->vcd_ion_client,
+		if (res_trk_get_core_type() != (u32)VCD_CORE_720P) {
+			ret = ion_map_iommu(cctxt->vcd_ion_client,
 				map_buffer->alloc_handle,
 				VIDEO_DOMAIN,
 				VIDEO_MAIN_POOL,
@@ -115,18 +121,32 @@
 				(unsigned long *)&iova,
 				(unsigned long *)&buffer_size,
 				UNCACHED, 0);
-		if (ret) {
-			pr_err("%s() ION iommu map failed", __func__);
-			goto ion_map_bailout;
+			if (ret) {
+				pr_err("%s() ION iommu map failed", __func__);
+				goto ion_map_bailout;
+			}
+			map_buffer->phy_addr = iova;
+		} else {
+			ret = ion_phys(cctxt->vcd_ion_client,
+				map_buffer->alloc_handle,
+				&phyaddr,
+				&len);
+			if (ret) {
+				pr_err("%s() ion_phys failed", __func__);
+				goto ion_map_bailout;
+			}
+			map_buffer->phy_addr = phyaddr;
 		}
-		map_buffer->phy_addr = iova;
 		if (!map_buffer->phy_addr) {
 			pr_err("%s() acm alloc failed", __func__);
 			goto free_map_table;
 		}
-		*phy_addr = (u8 *)iova;
+		*phy_addr = (u8 *)map_buffer->phy_addr;
 		mapped_buffer = NULL;
 		map_buffer->mapped_buffer = NULL;
+		VCD_MSG_LOW("vcd_ion_alloc: phys(0x%x), virt(0x%x), "\
+			"sz(%u), ionflags(0x%x)", (u32)*phy_addr,
+			(u32)*kernel_vaddr, sz, (u32)ionflag);
 	}
 
 	return 0;
@@ -176,10 +196,13 @@
 	if (map_buffer->mapped_buffer)
 		msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
 	if (cctxt->vcd_enable_ion) {
+		VCD_MSG_LOW("vcd_ion_free: phys(0x%x), virt(0x%x)",
+			(u32)phy_addr, (u32)kernel_vaddr);
 		if (map_buffer->alloc_handle) {
 			ion_unmap_kernel(cctxt->vcd_ion_client,
 					map_buffer->alloc_handle);
-			ion_unmap_iommu(cctxt->vcd_ion_client,
+			if (res_trk_get_core_type() != (u32)VCD_CORE_720P)
+				ion_unmap_iommu(cctxt->vcd_ion_client,
 					map_buffer->alloc_handle,
 					VIDEO_DOMAIN,
 					VIDEO_MAIN_POOL);
@@ -187,6 +210,8 @@
 			map_buffer->alloc_handle);
 		}
 	} else {
+		VCD_MSG_LOW("vcd_pmem_free: phys(0x%x), virt(0x%x)",
+			(u32)phy_addr, (u32)kernel_vaddr);
 		free_contiguous_memory_by_paddr(
 			(unsigned long)map_buffer->phy_addr);
 	}
diff --git a/fs/yaffs2/yaffs_vfs.c b/fs/yaffs2/yaffs_vfs.c
index 8e8c55b..4dd618f 100644
--- a/fs/yaffs2/yaffs_vfs.c
+++ b/fs/yaffs2/yaffs_vfs.c
@@ -497,8 +497,16 @@
 
 	if (ret_val == YAFFS_OK) {
 		if (target) {
-			drop_nlink(new_dentry->d_inode);
-			mark_inode_dirty(new_dentry->d_inode);
+			/*
+			 * We have identified target to be a
+			 * valid directory earlier. If it is
+			 * not the case throw a warning.
+			 */
+			WARN_ON(!new_dentry->d_inode);
+			if (new_dentry->d_inode) {
+				drop_nlink(new_dentry->d_inode);
+				mark_inode_dirty(new_dentry->d_inode);
+			}
 		}
 
 		update_dir_time(old_dir);
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index fbffdd2..7769950 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -24,6 +24,7 @@
 #define MEMORY_DEVICE_MODE		2
 #define NO_LOGGING_MODE			3
 #define UART_MODE			4
+#define SOCKET_MODE			5
 
 /* different values that go in for diag_data_type */
 #define DATA_TYPE_EVENT         	0
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 32d8ec2..cb56293 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -210,6 +210,7 @@
 	int use_for_apm;
 };
 
+#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
 extern struct power_supply *power_supply_get_by_name(char *name);
 extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
@@ -218,10 +219,31 @@
 extern int power_supply_set_online(struct power_supply *psy, bool enable);
 extern int power_supply_set_scope(struct power_supply *psy, int scope);
 extern int power_supply_set_charge_type(struct power_supply *psy, int type);
-
-#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+extern int power_supply_set_supply_type(struct power_supply *psy,
+					enum power_supply_type supply_type);
 extern int power_supply_is_system_supplied(void);
 #else
+static inline struct power_supply *power_supply_get_by_name(char *name)
+							{ return -ENOSYS; }
+static inline int power_supply_am_i_supplied(struct power_supply *psy)
+							{ return -ENOSYS; }
+static inline int power_supply_set_battery_charged(struct power_supply *psy)
+							{ return -ENOSYS; }
+static inline int power_supply_set_current_limit(struct power_supply *psy,
+							int limit)
+							{ return -ENOSYS; }
+static inline int power_supply_set_online(struct power_supply *psy,
+							bool enable)
+							{ return -ENOSYS; }
+static inline int power_supply_set_scope(struct power_supply *psy,
+							int scope)
+							{ return -ENOSYS; }
+static inline int power_supply_set_charge_type(struct power_supply *psy,
+							int type)
+							{ return -ENOSYS; }
+static inline int power_supply_set_supply_type(struct power_supply *psy,
+					enum power_supply_type supply_type);
+							{ return -ENOSYS; }
 static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
 #endif
 
diff --git a/include/linux/qpnp/pwm.h b/include/linux/qpnp/pwm.h
index de89a37..50c15e9 100644
--- a/include/linux/qpnp/pwm.h
+++ b/include/linux/qpnp/pwm.h
@@ -114,6 +114,18 @@
 int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value);
 
 /*
+ * enum pm_pwm_mode - PWM mode selection
+ * %PM_PWM_MODE_PWM - Select PWM mode
+ * %PM_PWM_MODE_LPG - Select LPG mode
+ */
+enum pm_pwm_mode {
+	PM_PWM_MODE_PWM,
+	PM_PWM_MODE_LPG,
+};
+
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode);
+
+/*
  * lut_params: Lookup table (LUT) parameters
  * @start_idx: start index in lookup table from 0 to MAX-1
  * @idx_len: number of index
@@ -134,8 +146,6 @@
 int pwm_lut_config(struct pwm_device *pwm, int period_us,
 		int duty_pct[], struct lut_params lut_params);
 
-int pwm_lut_enable(struct pwm_device *pwm, int start);
-
 /* Standard APIs supported */
 /*
  * pwm_request - request a PWM device
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 3b1d06d..b9ecd60 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -376,6 +376,7 @@
 
 struct msm_usb_host_platform_data {
 	unsigned int power_budget;
+	int pmic_gpio_dp_irq;
 	unsigned int dock_connect_irq;
 };
 
@@ -389,21 +390,46 @@
 	bool core_clk_always_on_workaround;
 };
 
+/**
+ * struct usb_bam_pipe_connect: pipe connection information
+ * between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
+ * either src BAM or dst BAM
+ * @src_phy_addr: src bam physical address.
+ * @src_pipe_index: src bam pipe index.
+ * @dst_phy_addr: dst bam physical address.
+ * @dst_pipe_index: dst bam pipe index.
+ * @data_fifo_base_offset: data fifo offset.
+ * @data_fifo_size: data fifo size.
+ * @desc_fifo_base_offset: descriptor fifo offset.
+ * @desc_fifo_size: descriptor fifo size.
+ */
 struct usb_bam_pipe_connect {
 	u32 src_phy_addr;
-	int src_pipe_index;
+	u32 src_pipe_index;
 	u32 dst_phy_addr;
-	int dst_pipe_index;
+	u32 dst_pipe_index;
 	u32 data_fifo_base_offset;
 	u32 data_fifo_size;
 	u32 desc_fifo_base_offset;
 	u32 desc_fifo_size;
 };
 
+/**
+ * struct msm_usb_bam_platform_data: pipe connection information
+ * between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
+ * either src BAM or dst BAM
+ * @connections: holds all pipe connections data.
+ * @usb_active_bam: set USB or HSIC as the active BAM.
+ * @usb_bam_num_pipes: max number of pipes to use.
+ * @active_conn_num: number of active pipe connections.
+ * @usb_base_address: BAM physical address.
+ */
 struct msm_usb_bam_platform_data {
 	struct usb_bam_pipe_connect *connections;
 	int usb_active_bam;
 	int usb_bam_num_pipes;
+	u32 total_bam_num;
+	u32 usb_base_address;
 };
 
 enum usb_bam {
@@ -411,8 +437,27 @@
 	HSIC_BAM,
 };
 
+#ifdef CONFIG_USB_DWC3_MSM
 int msm_ep_config(struct usb_ep *ep);
 int msm_ep_unconfig(struct usb_ep *ep);
-int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size);
+int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size,
+	u8 dst_pipe_idx);
 
+#else
+static inline int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size,
+	u8 dst_pipe_idx)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ep_config(struct usb_ep *ep)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ep_unconfig(struct usb_ep *ep)
+{
+	return -ENODEV;
+}
+#endif
 #endif
diff --git a/include/media/msm/vcd_api.h b/include/media/msm/vcd_api.h
index c93b696..7104028 100644
--- a/include/media/msm/vcd_api.h
+++ b/include/media/msm/vcd_api.h
@@ -55,8 +55,8 @@
 
 struct vcd_aspect_ratio {
 	u32 aspect_ratio;
-	u32 extended_par_width;
-	u32 extended_par_height;
+	u32 par_width;
+	u32 par_height;
 };
 
 struct vcd_frame_data {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 588fd07..11f7153 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -506,8 +506,7 @@
 #define AXI_CMD_RECORD       BIT(2)
 #define AXI_CMD_ZSL          BIT(3)
 #define AXI_CMD_RAW_CAPTURE  BIT(4)
-
-
+#define AXI_CMD_LIVESHOT     BIT(5)
 
 /* vfe config command: config command(from config thread)*/
 struct msm_vfe_cfg_cmd {
@@ -1765,6 +1764,7 @@
 	uint32_t capture_count;
 	uint32_t skip_abort;
 	uint16_t port_info;
+	uint32_t inst_handle;
 	uint16_t cmd_type;
 };
 
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index f8dbed9..9fa5932 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -220,6 +220,19 @@
 #define VFE_CMD_STATS_BHIST_START                       147
 #define VFE_CMD_STATS_BHIST_STOP                        148
 #define VFE_CMD_RESET_2                                 149
+#define VFE_CMD_FOV_ENC_CFG                             150
+#define VFE_CMD_FOV_VIEW_CFG                            151
+#define VFE_CMD_FOV_ENC_UPDATE                          152
+#define VFE_CMD_FOV_VIEW_UPDATE                         153
+#define VFE_CMD_SCALER_ENC_CFG                          154
+#define VFE_CMD_SCALER_VIEW_CFG                         155
+#define VFE_CMD_SCALER_ENC_UPDATE                       156
+#define VFE_CMD_SCALER_VIEW_UPDATE                      157
+#define VFE_CMD_COLORXFORM_ENC_CFG                      158
+#define VFE_CMD_COLORXFORM_VIEW_CFG                     159
+#define VFE_CMD_COLORXFORM_ENC_UPDATE                   160
+#define VFE_CMD_COLORXFORM_VIEW_UPDATE                  161
+#define VFE_CMD_TEST_GEN_CFG                            162
 
 struct msm_isp_cmd {
 	int32_t  id;
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1c09820..0296174 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -43,14 +43,17 @@
 	TP_STRUCT__entry(
 		__field(	int,	irq		)
 		__string(	name,	action->name	)
+		__field(void*,	handler)
 	),
 
 	TP_fast_assign(
 		__entry->irq = irq;
 		__assign_str(name, action->name);
+		__entry->handler = action->handler;
 	),
 
-	TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
+	TP_printk("irq=%d name=%s handler=%pf",
+		 __entry->irq, __get_str(name), __entry->handler)
 );
 
 /**
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 425bcfe..dd53c79 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -43,15 +43,17 @@
  */
 TRACE_EVENT(timer_start,
 
-	TP_PROTO(struct timer_list *timer, unsigned long expires),
+	TP_PROTO(struct timer_list *timer,
+		 unsigned long expires, char deferrable),
 
-	TP_ARGS(timer, expires),
+	TP_ARGS(timer, expires, deferrable),
 
 	TP_STRUCT__entry(
 		__field( void *,	timer		)
 		__field( void *,	function	)
 		__field( unsigned long,	expires		)
 		__field( unsigned long,	now		)
+		__field(char,	deferrable)
 	),
 
 	TP_fast_assign(
@@ -59,11 +61,12 @@
 		__entry->function	= timer->function;
 		__entry->expires	= expires;
 		__entry->now		= jiffies;
+		__entry->deferrable     = deferrable;
 	),
 
-	TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
+	TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] defer=%c",
 		  __entry->timer, __entry->function, __entry->expires,
-		  (long)__entry->expires - __entry->now)
+		  (long)__entry->expires - __entry->now, __entry->deferrable)
 );
 
 /**
diff --git a/kernel/timer.c b/kernel/timer.c
index a297ffc..24c5d20 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -584,7 +584,8 @@
 debug_activate(struct timer_list *timer, unsigned long expires)
 {
 	debug_timer_activate(timer);
-	trace_timer_start(timer, expires);
+	trace_timer_start(timer, expires,
+			 tbase_get_deferrable(timer->base) > 0 ? 'y' : 'n');
 }
 
 static inline void debug_deactivate(struct timer_list *timer)
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8129d97..5e5ad91 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -18,6 +18,7 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/netdevice.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
@@ -176,6 +177,7 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	struct tc_prio_qopt *qopt;
 	int i;
+	int flow_change = 0;
 
 	if (nla_len(opt) < sizeof(*qopt))
 		return -EINVAL;
@@ -190,7 +192,10 @@
 	}
 
 	sch_tree_lock(sch);
-	q->enable_flow = qopt->enable_flow;
+	if (q->enable_flow != qopt->enable_flow) {
+		q->enable_flow = qopt->enable_flow;
+		flow_change = 1;
+	}
 	q->bands = qopt->bands;
 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
@@ -225,6 +230,13 @@
 			}
 		}
 	}
+
+	/* Schedule qdisc when flow re-enabled */
+	if (flow_change && q->enable_flow) {
+		if (!test_bit(__QDISC_STATE_DEACTIVATED,
+			      &sch->state))
+			__netif_schedule(qdisc_root(sch));
+	}
 	return 0;
 }
 
diff --git a/sound/soc/msm/msm-pcm-afe.c b/sound/soc/msm/msm-pcm-afe.c
index 5f3cada..b7b4d51 100644
--- a/sound/soc/msm/msm-pcm-afe.c
+++ b/sound/soc/msm/msm-pcm-afe.c
@@ -140,9 +140,6 @@
 						runtime->channels * 2)));
 				pr_debug("prtd->poll_time: %d",
 						prtd->poll_time);
-				hrtimer_start(&prtd->hrt,
-					ns_to_ktime(0),
-					HRTIMER_MODE_REL);
 				break;
 			}
 			case AFE_EVENT_RTPORT_STOP:
@@ -206,9 +203,6 @@
 				snd_pcm_lib_period_bytes(prtd->substream)
 					* 1000 * 1000)/(runtime->rate
 					* runtime->channels * 2)));
-			hrtimer_start(&prtd->hrt,
-				ns_to_ktime(0),
-				HRTIMER_MODE_REL);
 			pr_debug("prtd->poll_time : %d", prtd->poll_time);
 			break;
 		}
@@ -465,6 +459,8 @@
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 		pr_debug("%s: SNDRV_PCM_TRIGGER_START\n", __func__);
 		prtd->start = 1;
+		hrtimer_start(&prtd->hrt, ns_to_ktime(0),
+					HRTIMER_MODE_REL);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
diff --git a/sound/soc/msm/qdsp6/q6afe.c b/sound/soc/msm/qdsp6/q6afe.c
index 4c0ac9e..a4f4b60 100644
--- a/sound/soc/msm/qdsp6/q6afe.c
+++ b/sound/soc/msm/qdsp6/q6afe.c
@@ -448,7 +448,7 @@
 
 	if ((port_id == RT_PROXY_DAI_001_RX) ||
 		(port_id == RT_PROXY_DAI_002_TX))
-		return -EINVAL;
+		return 0;
 	if ((port_id == RT_PROXY_DAI_002_RX) ||
 		(port_id == RT_PROXY_DAI_001_TX))
 		port_id = VIRTUAL_ID_TO_PORTID(port_id);
@@ -608,7 +608,7 @@
 
 	if ((port_id == RT_PROXY_DAI_001_RX) ||
 		(port_id == RT_PROXY_DAI_002_TX))
-		return -EINVAL;
+		return 0;
 	if ((port_id == RT_PROXY_DAI_002_RX) ||
 		(port_id == RT_PROXY_DAI_001_TX))
 		port_id = VIRTUAL_ID_TO_PORTID(port_id);
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 99fd1d3..485569b 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -40,7 +40,11 @@
 	union afe_port_config port_config;
 };
 
-static struct clk *pcm_clk;
+static struct clk *pcm_src_clk;
+static struct clk *pcm_branch_clk;
+static struct clk *pcm_oe_src_clk;
+static struct clk *pcm_oe_branch_clk;
+
 static DEFINE_MUTEX(aux_pcm_mutex);
 static int aux_pcm_count;
 
@@ -120,6 +124,9 @@
 	if (IS_ERR_VALUE(rc))
 		dev_err(dai->dev, "fail to close AUX PCM TX port\n");
 
+	clk_disable_unprepare(pcm_branch_clk);
+	clk_disable_unprepare(pcm_oe_branch_clk);
+
 	mutex_unlock(&aux_pcm_mutex);
 }
 
@@ -127,8 +134,11 @@
 		struct snd_soc_dai *dai)
 {
 	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	struct msm_dai_auxpcm_pdata *auxpcm_pdata = NULL;
 	int rc = 0;
 
+	auxpcm_pdata = dai->dev->platform_data;
+
 	mutex_lock(&aux_pcm_mutex);
 
 	if (aux_pcm_count == 2) {
@@ -170,12 +180,37 @@
 	 * assert/deasset and afe_open sequence is not followed.
 	 */
 
+	rc = clk_set_rate(pcm_src_clk, auxpcm_pdata->pcm_clk_rate);
+	if (rc < 0) {
+		pr_err("%s: clk_set_rate failed\n", __func__);
+		goto fail;
+	}
+
+	rc = clk_prepare_enable(pcm_branch_clk);
+	if (rc) {
+		pr_err("%s: clk enable failed\n", __func__);
+		goto fail;
+	}
+
+	rc = clk_set_rate(pcm_oe_src_clk, 24576000>>1);
+	if (rc < 0) {
+		pr_err("%s: clk_set_rate on pcm oe failed\n", __func__);
+		goto fail;
+	}
+
+	rc = clk_prepare_enable(pcm_oe_branch_clk);
+	if (rc) {
+		pr_err("%s: clk enable pcm_oe_branch_clk failed\n", __func__);
+		goto fail;
+	}
+
 	afe_open(PCM_RX, &dai_data->port_config, dai_data->rate);
 
 	afe_open(PCM_TX, &dai_data->port_config, dai_data->rate);
 
 	mutex_unlock(&aux_pcm_mutex);
 
+fail:
 	return rc;
 }
 
@@ -217,6 +252,7 @@
 	auxpcm_pdata = (struct msm_dai_auxpcm_pdata *)
 					dev_get_drvdata(dai->dev);
 	dai->dev->platform_data = auxpcm_pdata;
+	dai->id = dai->dev->id;
 
 	mutex_lock(&aux_pcm_mutex);
 
@@ -225,9 +261,41 @@
 	 * data to the cpu driver, since cpu drive is unaware of any
 	 * boarc specific configuration.
 	 */
-	if (!pcm_clk)
-		pcm_clk = clk_get(dai->dev, auxpcm_pdata->clk);
+	if ((!pcm_src_clk) || (!pcm_branch_clk)) {
+		pcm_src_clk = clk_get(dai->dev, auxpcm_pdata->clk);
 
+		if (IS_ERR(pcm_src_clk)) {
+			pr_err("%s: could not get pcm_src_clk\n", __func__);
+			pcm_src_clk = NULL;
+			return -ENODEV;
+		}
+
+		pcm_branch_clk = clk_get(dai->dev, "ibit_clk");
+
+		if (IS_ERR(pcm_branch_clk)) {
+			pr_err("%s: could not get pcm_branch_clk\n", __func__);
+			pcm_branch_clk = NULL;
+			return -ENODEV;
+		}
+	}
+
+	if ((!pcm_oe_src_clk) || (!pcm_oe_branch_clk)) {
+
+		pcm_oe_src_clk = clk_get(dai->dev, "core_oe_src_clk");
+
+		if (IS_ERR(pcm_oe_src_clk)) {
+			pr_err("%s: could not get pcm_oe_src_clk\n", __func__);
+			pcm_oe_src_clk = NULL;
+			return -ENODEV;
+		}
+
+		pcm_oe_branch_clk = clk_get(dai->dev, "core_oe_clk");
+		if (IS_ERR(pcm_oe_branch_clk)) {
+			pr_err("%s: could not get pcm_oe_clk\n", __func__);
+			pcm_oe_branch_clk = NULL;
+			return -ENODEV;
+		}
+	}
 	mutex_unlock(&aux_pcm_mutex);
 
 	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data), GFP_KERNEL);
@@ -813,7 +881,7 @@
 	.remove = msm_dai_q6_dai_remove,
 };
 
-static int msm_auxpcm_dev_probe(struct platform_device *pdev)
+static int __devinit msm_auxpcm_dev_probe(struct platform_device *pdev)
 {
 	int id;
 	void *plat_data;
@@ -837,6 +905,7 @@
 	dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
 
 	dev_set_drvdata(&pdev->dev, plat_data);
+	pdev->dev.id = id;
 
 	switch (id) {
 	case AFE_PORT_ID_PRIMARY_PCM_RX:
@@ -855,7 +924,7 @@
 	return rc;
 }
 
-static int msm_auxpcm_resource_probe(
+static int __devinit msm_auxpcm_resource_probe(
 			struct platform_device *pdev)
 {
 	int rc = 0;
@@ -950,13 +1019,13 @@
 	return rc;
 }
 
-static int msm_auxpcm_dev_remove(struct platform_device *pdev)
+static int __devexit msm_auxpcm_dev_remove(struct platform_device *pdev)
 {
 	snd_soc_unregister_dai(&pdev->dev);
 	return 0;
 }
 
-static int msm_auxpcm_resource_remove(
+static int __devexit msm_auxpcm_resource_remove(
 				struct platform_device *pdev)
 {
 	void *auxpcm_pdata;
@@ -967,22 +1036,20 @@
 	return 0;
 }
 
-static const struct of_device_id msm_auxpcm_resource_dt_match[] = {
+static struct of_device_id msm_auxpcm_resource_dt_match[] = {
 	{ .compatible = "qcom,msm-auxpcm-resource", },
 	{}
 };
-MODULE_DEVICE_TABLE(of, msm_auxpcm_resource_dt_match);
 
-static const struct of_device_id msm_auxpcm_dev_dt_match[] = {
+static struct of_device_id msm_auxpcm_dev_dt_match[] = {
 	{ .compatible = "qcom,msm-auxpcm-dev", },
 	{}
 };
-MODULE_DEVICE_TABLE(of, msm_auxpcm_dev_dt_match);
 
 
-static struct platform_driver msm_auxpcm_dev = {
+static struct platform_driver msm_auxpcm_dev_driver = {
 	.probe  = msm_auxpcm_dev_probe,
-	.remove = msm_auxpcm_dev_remove,
+	.remove = __devexit_p(msm_auxpcm_dev_remove),
 	.driver = {
 		.name = "msm-auxpcm-dev",
 		.owner = THIS_MODULE,
@@ -990,9 +1057,9 @@
 	},
 };
 
-static struct platform_driver msm_auxpcm_resource = {
+static struct platform_driver msm_auxpcm_resource_driver = {
 	.probe  = msm_auxpcm_resource_probe,
-	.remove  = msm_auxpcm_resource_remove,
+	.remove  = __devexit_p(msm_auxpcm_resource_remove),
 	.driver = {
 		.name = "msm-auxpcm-resource",
 		.owner = THIS_MODULE,
@@ -1134,22 +1201,23 @@
 {
 	int rc;
 
-	rc = platform_driver_register(&msm_auxpcm_dev);
+	rc = platform_driver_register(&msm_auxpcm_dev_driver);
 	if (rc)
 		goto fail;
 
-	rc = platform_driver_register(&msm_auxpcm_resource);
+	rc = platform_driver_register(&msm_auxpcm_resource_driver);
+
 	if (rc) {
 		pr_err("%s: fail to register cpu dai driver\n", __func__);
-		platform_driver_unregister(&msm_auxpcm_dev);
+		platform_driver_unregister(&msm_auxpcm_dev_driver);
 		goto fail;
 	}
 
 	rc = platform_driver_register(&msm_dai_q6);
 	if (rc) {
 		pr_err("%s: fail to register dai q6 driver", __func__);
-		platform_driver_unregister(&msm_auxpcm_dev);
-		platform_driver_unregister(&msm_auxpcm_resource);
+		platform_driver_unregister(&msm_auxpcm_dev_driver);
+		platform_driver_unregister(&msm_auxpcm_resource_driver);
 		goto fail;
 	}
 
@@ -1157,8 +1225,8 @@
 	if (rc) {
 		pr_err("%s: fail to register dai q6 dev driver", __func__);
 		platform_driver_unregister(&msm_dai_q6);
-		platform_driver_unregister(&msm_auxpcm_dev);
-		platform_driver_unregister(&msm_auxpcm_resource);
+		platform_driver_unregister(&msm_auxpcm_dev_driver);
+		platform_driver_unregister(&msm_auxpcm_resource_driver);
 		goto fail;
 	}
 fail:
@@ -1170,8 +1238,8 @@
 {
 	platform_driver_unregister(&msm_dai_q6_dev);
 	platform_driver_unregister(&msm_dai_q6);
-	platform_driver_unregister(&msm_auxpcm_dev);
-	platform_driver_unregister(&msm_auxpcm_resource);
+	platform_driver_unregister(&msm_auxpcm_dev_driver);
+	platform_driver_unregister(&msm_auxpcm_resource_driver);
 }
 module_exit(msm_dai_q6_exit);